^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2001 The Regents of the University of Michigan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Kendrick Smith <kmsmith@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Andy Adamson <kandros@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 3. Neither the name of the University nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/sunrpc/svcauth_gss.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/string_helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "xdr4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "xdr4cb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "vfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "current_stateid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "filecache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define NFSDDBG_FACILITY NFSDDBG_PROC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define all_ones {{~0,~0},~0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static const stateid_t one_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .si_generation = ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .si_opaque = all_ones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static const stateid_t zero_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* all fields zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static const stateid_t currentstateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .si_generation = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static const stateid_t close_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .si_generation = 0xffffffffU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static u64 current_sessionid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* forward declarations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void nfsd4_end_grace(struct nfsd_net *nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Locking: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Currently used for the del_recall_lru and file hash table. In an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * effort to decrease the scope of the client_mutex, this spinlock may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * eventually cover more:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static DEFINE_SPINLOCK(state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) enum nfsd4_st_mutex_lock_subclass {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) OPEN_STATEID_MUTEX = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) LOCK_STATEID_MUTEX = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * the refcount on the open stateid to drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static DECLARE_WAIT_QUEUE_HEAD(close_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * A waitqueue where a writer to clients/#/ctl destroying a client can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * wait for cl_rpc_users to drop to 0 and then for the client to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * unhashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static struct kmem_cache *client_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static struct kmem_cache *openowner_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static struct kmem_cache *lockowner_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static struct kmem_cache *file_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static struct kmem_cache *stateid_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static struct kmem_cache *deleg_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct kmem_cache *odstate_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void free_session(struct nfsd4_session *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static bool is_session_dead(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ses->se_flags & NFS4_SESSION_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (atomic_read(&ses->se_ref) > ref_held_by_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ses->se_flags |= NFS4_SESSION_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static bool is_client_expired(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return clp->cl_time == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static __be32 get_client_locked(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (is_client_expired(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return nfserr_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) atomic_inc(&clp->cl_rpc_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* must be called under the client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) renew_client_locked(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (is_client_expired(clp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) printk("%s: client (clientid %08x/%08x) already expired\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) clp->cl_clientid.cl_boot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) clp->cl_clientid.cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) list_move_tail(&clp->cl_lru, &nn->client_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) clp->cl_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void put_client_renew_locked(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!atomic_dec_and_test(&clp->cl_rpc_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!is_client_expired(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) renew_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) wake_up_all(&expiry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void put_client_renew(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!is_client_expired(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) renew_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) wake_up_all(&expiry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (is_session_dead(ses))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return nfserr_badsession;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) status = get_client_locked(ses->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) atomic_inc(&ses->se_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void nfsd4_put_session_locked(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) free_session(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) put_client_renew_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void nfsd4_put_session(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nfsd4_put_session_locked(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static struct nfsd4_blocked_lock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct nfsd4_blocked_lock *cur, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (fh_match(fh, &cur->nbl_fh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_del_init(&cur->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) list_del_init(&cur->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) found = cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) locks_delete_block(&found->nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct nfsd4_blocked_lock *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct nfsd4_blocked_lock *nbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) nbl = find_blocked_lock(lo, fh, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!nbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (nbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) INIT_LIST_HEAD(&nbl->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) INIT_LIST_HEAD(&nbl->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) fh_copy_shallow(&nbl->nbl_fh, fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) locks_init_lock(&nbl->nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) &nfsd4_cb_notify_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) NFSPROC4_CLNT_CB_NOTIFY_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return nbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) free_blocked_lock(struct nfsd4_blocked_lock *nbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) locks_delete_block(&nbl->nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) locks_release_private(&nbl->nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) kfree(nbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) remove_blocked_locks(struct nfs4_lockowner *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct nfs4_client *clp = lo->lo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct nfsd4_blocked_lock *nbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) LIST_HEAD(reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Dequeue all blocked locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) while (!list_empty(&lo->lo_blocked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) nbl = list_first_entry(&lo->lo_blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct nfsd4_blocked_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) list_del_init(&nbl->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) list_move(&nbl->nbl_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Now free them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) while (!list_empty(&reaplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) list_del_init(&nbl->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) free_blocked_lock(nbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct nfsd4_blocked_lock *nbl = container_of(cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct nfsd4_blocked_lock, nbl_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) locks_delete_block(&nbl->nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Since this is just an optimization, we don't try very hard if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * just quit trying on anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rpc_delay(task, 1 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct nfsd4_blocked_lock *nbl = container_of(cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct nfsd4_blocked_lock, nbl_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) free_blocked_lock(nbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .prepare = nfsd4_cb_notify_lock_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .done = nfsd4_cb_notify_lock_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .release = nfsd4_cb_notify_lock_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static inline struct nfs4_stateowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) nfs4_get_stateowner(struct nfs4_stateowner *sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) atomic_inc(&sop->so_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return sop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return (sop->so_owner.len == owner->len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static struct nfs4_openowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct nfs4_stateowner *so;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) so_strhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!so->so_is_open_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (same_owner_str(so, &open->op_owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return openowner(nfs4_get_stateowner(so));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static struct nfs4_openowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) oo = find_openstateowner_str_locked(hashval, open, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) opaque_hashval(const void *ptr, int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned char *cptr = (unsigned char *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) while (nbytes--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) x *= 37;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) x += *cptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void nfsd4_free_file_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) kmem_cache_free(file_slab, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) put_nfs4_file(struct nfs4_file *fi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) might_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) hlist_del_rcu(&fi->fi_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) __nfs4_get_fd(struct nfs4_file *f, int oflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (f->fi_fds[oflag])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return nfsd_file_get(f->fi_fds[oflag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) find_writeable_file_locked(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct nfsd_file *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) lockdep_assert_held(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = __nfs4_get_fd(f, O_WRONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ret = __nfs4_get_fd(f, O_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) find_writeable_file(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct nfsd_file *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = find_writeable_file_locked(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_unlock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) find_readable_file_locked(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct nfsd_file *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) lockdep_assert_held(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ret = __nfs4_get_fd(f, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ret = __nfs4_get_fd(f, O_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) find_readable_file(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct nfsd_file *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_lock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ret = find_readable_file_locked(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) find_any_file(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct nfsd_file *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) spin_lock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = __nfs4_get_fd(f, O_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ret = __nfs4_get_fd(f, O_WRONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret = __nfs4_get_fd(f, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) spin_unlock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct nfsd_file *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) spin_lock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (f->fi_deleg_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret = nfsd_file_get(f->fi_deleg_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) spin_unlock(&f->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static atomic_long_t num_delegations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long max_delegations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Open owner state (share locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* hash tables for lock and open owners */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define OWNER_HASH_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = opaque_hashval(ownername->data, ownername->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return ret & OWNER_HASH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* hash table for nfs4_file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #define FILE_HASH_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static unsigned int file_hashval(struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (access & NFS4_SHARE_ACCESS_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) atomic_inc(&fp->fi_access[O_WRONLY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (access & NFS4_SHARE_ACCESS_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) atomic_inc(&fp->fi_access[O_RDONLY]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) nfs4_file_get_access(struct nfs4_file *fp, u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Does this access mode make sense? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (access & ~NFS4_SHARE_ACCESS_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Does it conflict with a deny mode already set? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if ((access & fp->fi_share_deny) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return nfserr_share_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) __nfs4_file_get_access(fp, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Common case is that there is no deny mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (deny) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Does this deny mode make sense? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (deny & ~NFS4_SHARE_DENY_BOTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if ((deny & NFS4_SHARE_DENY_READ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) atomic_read(&fp->fi_access[O_RDONLY]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return nfserr_share_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if ((deny & NFS4_SHARE_DENY_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) atomic_read(&fp->fi_access[O_WRONLY]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return nfserr_share_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) might_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct nfsd_file *f1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct nfsd_file *f2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) swap(f1, fp->fi_fds[oflag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) swap(f2, fp->fi_fds[O_RDWR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (f1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) nfsd_file_put(f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (f2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nfsd_file_put(f2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (access & NFS4_SHARE_ACCESS_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) __nfs4_file_put_access(fp, O_WRONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (access & NFS4_SHARE_ACCESS_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) __nfs4_file_put_access(fp, O_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Allocate a new open/delegation state counter. This is needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * pNFS for proper return on close semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Note that we only allocate it for pNFS-enabled exports, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * all pointers to struct nfs4_clnt_odstate are always NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static struct nfs4_clnt_odstate *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) alloc_clnt_odstate(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct nfs4_clnt_odstate *co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (co) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) co->co_client = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) refcount_set(&co->co_odcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct nfs4_file *fp = co->co_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) list_add(&co->co_perfile, &fp->fi_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) get_clnt_odstate(struct nfs4_clnt_odstate *co)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (co)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) refcount_inc(&co->co_odcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) put_clnt_odstate(struct nfs4_clnt_odstate *co)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!co)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) fp = co->co_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) list_del(&co->co_perfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) nfsd4_return_all_file_layouts(co->co_client, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) kmem_cache_free(odstate_slab, co);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static struct nfs4_clnt_odstate *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct nfs4_clnt_odstate *co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct nfs4_client *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) cl = new->co_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (co->co_client == cl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) get_clnt_odstate(co);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) co = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) co->co_file = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) hash_clnt_odstate_locked(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return co;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) void (*sc_free)(struct nfs4_stid *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct nfs4_stid *stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int new_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) stid = kmem_cache_zalloc(slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_lock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Reserving 0 for start of file in nfsdfs "states" file: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (new_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) stid->sc_free = sc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) stid->sc_client = cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) stid->sc_stateid.si_opaque.so_id = new_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Will be incremented before return to client: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) refcount_set(&stid->sc_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) spin_lock_init(&stid->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) INIT_LIST_HEAD(&stid->sc_cp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * It shouldn't be a problem to reuse an opaque stateid value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * I don't think it is for 4.1. But with 4.0 I worry that, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * example, a stray write retransmission could be accepted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * the server when it should have been rejected. Therefore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * adopt a trick from the sctp code to attempt to maximize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * amount of time until an id is reused, by ensuring they always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * "increase" (mod INT_MAX):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) kmem_cache_free(slab, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Create a unique stateid_t to represent each COPY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned char sc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) int new_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) stid->sc_type = sc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) stid->stid.si_opaque.so_id = new_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) stid->stid.si_generation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (new_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct nfs4_stid *p_stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct nfs4_cpntf_state *cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) cps->cpntf_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) refcount_set(&cps->cp_stateid.sc_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) list_add(&cps->cp_list, &p_stid->sc_cp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) kfree(cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) void nfs4_free_copy_state(struct nfsd4_copy *copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct nfsd_net *nn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) nn = net_generic(copy->cp_clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) idr_remove(&nn->s2s_cp_stateids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) copy->cp_stateid.stid.si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct nfs4_cpntf_state *cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct nfsd_net *nn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) while (!list_empty(&stid->sc_cp_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cps = list_first_entry(&stid->sc_cp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct nfs4_cpntf_state, cp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) _free_cpntf_state_locked(nn, cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct nfs4_stid *stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return openlockstateid(stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static void nfs4_free_deleg(struct nfs4_stid *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) kmem_cache_free(deleg_slab, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) atomic_long_dec(&num_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * When we recall a delegation, we should be careful not to hand it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * out again straight away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * To ensure this we keep a pair of bloom filters ('new' and 'old')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * in which the filehandles of recalled delegations are "stored".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * If a filehandle appear in either filter, a delegation is blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * When a delegation is recalled, the filehandle is stored in the "new"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * Every 30 seconds we swap the filters and clear the "new" one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * unless both are empty of course.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Each filter is 256 bits. We hash the filehandle to 32bit and use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * low 3 bytes as hash-table indices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * 'blocked_delegations_lock', which is always taken in block_delegations(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * is used to manage concurrent access. Testing does not need the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * except when swapping the two filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static DEFINE_SPINLOCK(blocked_delegations_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static struct bloom_pair {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int entries, old_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) time64_t swap_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) int new; /* index into 'set' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) DECLARE_BITMAP(set[2], 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) } blocked_delegations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static int delegation_blocked(struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct bloom_pair *bd = &blocked_delegations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (bd->entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (ktime_get_seconds() - bd->swap_time > 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_lock(&blocked_delegations_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (ktime_get_seconds() - bd->swap_time > 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) bd->entries -= bd->old_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) bd->old_entries = bd->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memset(bd->set[bd->new], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) sizeof(bd->set[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) bd->new = 1-bd->new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) bd->swap_time = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) spin_unlock(&blocked_delegations_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) hash = jhash(&fh->fh_base, fh->fh_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (test_bit(hash&255, bd->set[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) test_bit((hash>>8)&255, bd->set[0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) test_bit((hash>>16)&255, bd->set[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (test_bit(hash&255, bd->set[1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) test_bit((hash>>8)&255, bd->set[1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) test_bit((hash>>16)&255, bd->set[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static void block_delegations(struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct bloom_pair *bd = &blocked_delegations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) hash = jhash(&fh->fh_base, fh->fh_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) spin_lock(&blocked_delegations_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) __set_bit(hash&255, bd->set[bd->new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) __set_bit((hash>>8)&255, bd->set[bd->new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) __set_bit((hash>>16)&255, bd->set[bd->new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (bd->entries == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) bd->swap_time = ktime_get_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) bd->entries += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_unlock(&blocked_delegations_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static struct nfs4_delegation *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct svc_fh *current_fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct nfs4_clnt_odstate *odstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dprintk("NFSD alloc_init_deleg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) n = atomic_long_inc_return(&num_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (n < 0 || n > max_delegations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (delegation_blocked(¤t_fh->fh_handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (dp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * delegation seqid's are never incremented. The 4.1 special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * meaning of seqid 0 isn't meaningful, really, but let's avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * 0 anyway just for consistency and use 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dp->dl_stid.sc_stateid.si_generation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) INIT_LIST_HEAD(&dp->dl_perfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) INIT_LIST_HEAD(&dp->dl_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) INIT_LIST_HEAD(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dp->dl_clnt_odstate = odstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) get_clnt_odstate(odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dp->dl_type = NFS4_OPEN_DELEGATE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dp->dl_retries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) get_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dp->dl_stid.sc_file = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) out_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) atomic_long_dec(&num_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) nfs4_put_stid(struct nfs4_stid *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct nfs4_file *fp = s->sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct nfs4_client *clp = s->sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) might_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) wake_up_all(&close_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) nfs4_free_cpntf_statelist(clp->net, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) s->sc_free(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) put_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) stateid_t *src = &stid->sc_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) spin_lock(&stid->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (unlikely(++src->si_generation == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) src->si_generation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) memcpy(dst, src, sizeof(*dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) spin_unlock(&stid->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static void put_deleg_file(struct nfs4_file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct nfsd_file *nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (--fp->fi_delegees == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) swap(nf, fp->fi_deleg_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct nfs4_file *fp = dp->dl_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct nfsd_file *nf = fp->fi_deleg_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) WARN_ON_ONCE(!fp->fi_delegees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) put_deleg_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) put_clnt_odstate(dp->dl_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) nfs4_unlock_deleg_lease(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) void nfs4_unhash_stid(struct nfs4_stid *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) s->sc_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * nfs4_delegation_exists - Discover if this delegation already exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * @clp: a pointer to the nfs4_client we're granting a delegation to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * @fp: a pointer to the nfs4_file we're granting a delegation on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * On success: true iff an existing delegation is found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct nfs4_delegation *searchdp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct nfs4_client *searchclp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) lockdep_assert_held(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) searchclp = searchdp->dl_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (clp == searchclp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * hash_delegation_locked - Add a delegation to the appropriate lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * @dp: a pointer to the nfs4_delegation we are adding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * @fp: a pointer to the nfs4_file we're granting a delegation on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * On success: NULL if the delegation was successfully hashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * On error: -EAGAIN if one was previously granted to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * nfs4_client for this nfs4_file. Delegation is not hashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct nfs4_client *clp = dp->dl_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) lockdep_assert_held(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (nfs4_delegation_exists(clp, fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) refcount_inc(&dp->dl_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) dp->dl_stid.sc_type = NFS4_DELEG_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) list_add(&dp->dl_perfile, &fp->fi_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) list_add(&dp->dl_perclnt, &clp->cl_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static bool delegation_hashed(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return !(list_empty(&dp->dl_perfile));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) unhash_delegation_locked(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct nfs4_file *fp = dp->dl_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) lockdep_assert_held(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!delegation_hashed(dp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* Ensure that deleg break won't try to requeue it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ++dp->dl_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) list_del_init(&dp->dl_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) list_del_init(&dp->dl_perfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static void destroy_delegation(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) bool unhashed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) unhashed = unhash_delegation_locked(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (unhashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) destroy_unhashed_deleg(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void revoke_delegation(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct nfs4_client *clp = dp->dl_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) WARN_ON(!list_empty(&dp->dl_recall_lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (clp->cl_minorversion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) refcount_inc(&dp->dl_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) list_add(&dp->dl_recall_lru, &clp->cl_revoked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) destroy_unhashed_deleg(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * SETCLIENTID state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static unsigned int clientid_hashval(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return id & CLIENT_HASH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static unsigned int clientstr_hashval(struct xdr_netobj name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * We store the NONE, READ, WRITE, and BOTH bits separately in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * st_{access,deny}_bmap field of the stateid, in order to track not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * only what share bits are currently in force, but also what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * combinations of share bits previous opens have used. This allows us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * to enforce the recommendation of rfc 3530 14.2.19 that the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * return an error if the client attempt to downgrade to a combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * of share bits not explicable by closing some of its previous opens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * XXX: This enforcement is actually incomplete, since we don't keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * track of access/deny bit combinations; so, e.g., we allow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * OPEN allow read, deny write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * OPEN allow both, deny none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * DOWNGRADE allow read, deny none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * which we should reject.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) bmap_to_share_mode(unsigned long bmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) unsigned int access = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) for (i = 1; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (test_bit(i, &bmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) access |= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* set share access for a given stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) set_access(u32 access, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) unsigned char mask = 1 << access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) stp->st_access_bmap |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* clear share access for a given stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) clear_access(u32 access, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) unsigned char mask = 1 << access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) stp->st_access_bmap &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* test whether a given stateid has access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) test_access(u32 access, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unsigned char mask = 1 << access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return (bool)(stp->st_access_bmap & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* set share deny for a given stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) set_deny(u32 deny, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) unsigned char mask = 1 << deny;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) stp->st_deny_bmap |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* clear share deny for a given stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) unsigned char mask = 1 << deny;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) stp->st_deny_bmap &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* test whether a given stateid is denying specific access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) test_deny(u32 deny, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) unsigned char mask = 1 << deny;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return (bool)(stp->st_deny_bmap & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static int nfs4_access_to_omode(u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) switch (access & NFS4_SHARE_ACCESS_BOTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case NFS4_SHARE_ACCESS_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return O_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case NFS4_SHARE_ACCESS_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return O_WRONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) case NFS4_SHARE_ACCESS_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return O_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return O_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * A stateid that had a deny mode associated with it is being released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * or downgraded. Recalculate the deny mode on the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) recalculate_deny_mode(struct nfs4_file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) fp->fi_share_deny = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) bool change = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) for (i = 1; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if ((i & deny) != i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) change = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) clear_deny(i, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* Recalculate per-file deny mode if there was a change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) recalculate_deny_mode(stp->st_stid.sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /* release all access and file references for a given stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) release_all_access(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct nfs4_file *fp = stp->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (fp && stp->st_deny_bmap != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) recalculate_deny_mode(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) for (i = 1; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (test_access(i, stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) nfs4_file_put_access(stp->st_stid.sc_file, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) clear_access(i, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) kfree(sop->so_owner.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sop->so_ops->so_free(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct nfs4_client *clp = sop->so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) might_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sop->so_ops->so_unhash(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) nfs4_free_stateowner(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return list_empty(&stp->st_perfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct nfs4_file *fp = stp->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (list_empty(&stp->st_perfile))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) list_del_init(&stp->st_perfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) list_del(&stp->st_perstateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct nfs4_ol_stateid *stp = openlockstateid(stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) put_clnt_odstate(stp->st_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) release_all_access(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (stp->st_stateowner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) nfs4_put_stateowner(stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) kmem_cache_free(stateid_slab, stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct nfs4_ol_stateid *stp = openlockstateid(stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) nf = find_any_file(stp->st_stid.sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) get_file(nf->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) filp_close(nf->nf_file, (fl_owner_t)lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) nfs4_free_ol_stateid(stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * Put the persistent reference to an already unhashed generic stateid, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * holding the cl_lock. If it's the last reference, then put it onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * reaplist for later destruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct list_head *reaplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct nfs4_stid *s = &stp->st_stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct nfs4_client *clp = s->sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) WARN_ON_ONCE(!list_empty(&stp->st_locks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!refcount_dec_and_test(&s->sc_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) wake_up_all(&close_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) list_add(&stp->st_locks, reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (!unhash_ol_stateid(stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) list_del_init(&stp->st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) nfs4_unhash_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static void release_lock_stateid(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct nfs4_client *clp = stp->st_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) bool unhashed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) unhashed = unhash_lock_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (unhashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct nfs4_client *clp = lo->lo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) list_del_init(&lo->lo_owner.so_strhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * Free a list of generic stateids that were collected earlier after being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * fully unhashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) free_ol_stateid_reaplist(struct list_head *reaplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) while (!list_empty(reaplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) list_del(&stp->st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) fp = stp->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) stp->st_stid.sc_free(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) put_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct list_head *reaplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) while (!list_empty(&open_stp->st_locks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) stp = list_entry(open_stp->st_locks.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct nfs4_ol_stateid, st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) WARN_ON(!unhash_lock_stateid(stp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) put_ol_stateid_locked(stp, reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct list_head *reaplist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!unhash_ol_stateid(stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) release_open_stateid_locks(stp, reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static void release_open_stateid(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) LIST_HEAD(reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) spin_lock(&stp->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (unhash_open_stateid(stp, &reaplist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) put_ol_stateid_locked(stp, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) spin_unlock(&stp->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) free_ol_stateid_reaplist(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static void unhash_openowner_locked(struct nfs4_openowner *oo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct nfs4_client *clp = oo->oo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) list_del_init(&oo->oo_owner.so_strhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) list_del_init(&oo->oo_perclient);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void release_last_closed_stateid(struct nfs4_openowner *oo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct nfs4_ol_stateid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) s = oo->oo_last_closed_stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) list_del_init(&oo->oo_close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) oo->oo_last_closed_stid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) nfs4_put_stid(&s->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static void release_openowner(struct nfs4_openowner *oo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct nfs4_client *clp = oo->oo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct list_head reaplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) INIT_LIST_HEAD(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) unhash_openowner_locked(oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) while (!list_empty(&oo->oo_owner.so_stateids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) stp = list_first_entry(&oo->oo_owner.so_stateids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct nfs4_ol_stateid, st_perstateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (unhash_open_stateid(stp, &reaplist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) put_ol_stateid_locked(stp, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) free_ol_stateid_reaplist(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) release_last_closed_stateid(oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) nfs4_put_stateowner(&oo->oo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) hash_sessionid(struct nfs4_sessionid *sessionid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return sid->sequence % SESSION_HASH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) #ifdef CONFIG_SUNRPC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) u32 *ptr = (u32 *)(&sessionid->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * won't be used for replay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct nfs4_stateowner *so = cstate->replay_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (nfserr == nfserr_replay_me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (!seqid_mutating_err(ntohl(nfserr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) nfsd4_cstate_clear_replay(cstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (so->so_is_open_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) release_last_closed_stateid(openowner(so));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) so->so_seqid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) gen_sessionid(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct nfsd4_sessionid *sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) sid->clientid = clp->cl_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) sid->sequence = current_sessionid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) sid->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * The protocol defines ca_maxresponssize_cached to include the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * the rpc header, but all we need to cache is the data starting after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * the end of the initial SEQUENCE operation--the rest we regenerate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * each time. Therefore we can advertise a ca_maxresponssize_cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * value that is the number of bytes in our cache plus a few additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * bytes. In order to stay on the safe side, and not promise more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * we can cache, those additional bytes must be the minimum possible: 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * bytes of rpc header (xid through accept state, with AUTH_NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * verifier), 12 for the compound header (with zero-length tag), and 44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * for the SEQUENCE op response:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) free_session_slots(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) free_svc_cred(&ses->se_slots[i]->sl_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) kfree(ses->se_slots[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * We don't actually need to cache the rpc and session headers, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * can allocate a little less for each slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return size + sizeof(struct nfsd4_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * XXX: If we run out of reserved DRC memory we could (up to a point)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * re-negotiate active sessions and reduce their slot usage to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * room for new connections. For now we just fail the create session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) u32 slotsize = slot_bytes(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) u32 num = ca->maxreqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) unsigned long avail, total_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) unsigned int scale_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) spin_lock(&nfsd_drc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (nfsd_drc_max_mem > nfsd_drc_mem_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* We have handed out more space than we chose in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * set_max_drc() to allow. That isn't really a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * problem as long as that doesn't make us think we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * have lots more due to integer overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) total_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * Never use more than a fraction of the remaining memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * unless it's the only way to give this client a slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * The chosen fraction is either 1/8 or 1/number of threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * whichever is smaller. This ensures there are adequate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * slots to support multiple clients per thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * Give the client one slot even if that would require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * over-allocation--it is better than failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) avail = clamp_t(unsigned long, avail, slotsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) total_avail/scale_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) num = min_t(int, num, avail / slotsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) num = max_t(int, num, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) nfsd_drc_mem_used += num * slotsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) spin_unlock(&nfsd_drc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int slotsize = slot_bytes(ca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) spin_lock(&nfsd_drc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) nfsd_drc_mem_used -= slotsize * ca->maxreqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) spin_unlock(&nfsd_drc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct nfsd4_channel_attrs *battrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) int numslots = fattrs->maxreqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int slotsize = slot_bytes(fattrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct nfsd4_session *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int mem, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) + sizeof(struct nfsd4_session) > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) mem = numslots * sizeof(struct nfsd4_slot *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* allocate each struct nfsd4_slot and data cache in one piece */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) for (i = 0; i < numslots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (!new->se_slots[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) kfree(new->se_slots[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static void free_conn(struct nfsd4_conn *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) svc_xprt_put(c->cn_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void nfsd4_conn_lost(struct svc_xpt_user *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct nfs4_client *clp = c->cn_session->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (!list_empty(&c->cn_persession)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) list_del(&c->cn_persession);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) free_conn(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) nfsd4_probe_callback(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct nfsd4_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) svc_xprt_get(rqstp->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) conn->cn_xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) conn->cn_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) INIT_LIST_HEAD(&conn->cn_xpt_user.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) conn->cn_session = ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) list_add(&conn->cn_persession, &ses->se_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) __nfsd4_hash_conn(conn, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static int nfsd4_register_conn(struct nfsd4_conn *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) conn->cn_xpt_user.callback = nfsd4_conn_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) nfsd4_hash_conn(conn, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ret = nfsd4_register_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* oops; xprt is already down: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) nfsd4_conn_lost(&conn->cn_xpt_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /* We may have gained or lost a callback channel: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) nfsd4_probe_callback_sync(ses->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) u32 dir = NFS4_CDFC4_FORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (cses->flags & SESSION4_BACK_CHAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) dir |= NFS4_CDFC4_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return alloc_conn(rqstp, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /* must be called under client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) static void nfsd4_del_conns(struct nfsd4_session *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct nfs4_client *clp = s->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) while (!list_empty(&s->se_conns)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) list_del_init(&c->cn_persession);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) free_conn(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static void __free_session(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) free_session_slots(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) kfree(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) static void free_session(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) nfsd4_del_conns(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) nfsd4_put_drc_mem(&ses->se_fchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) __free_session(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) new->se_client = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) gen_sessionid(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) INIT_LIST_HEAD(&new->se_conns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) new->se_cb_seq_nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) new->se_flags = cses->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) new->se_cb_prog = cses->callback_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) new->se_cb_sec = cses->cb_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) atomic_set(&new->se_ref, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) idx = hash_sessionid(&new->se_sessionid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) list_add(&new->se_perclnt, &clp->cl_sessions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct sockaddr *sa = svc_addr(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * This is a little silly; with sessions there's no real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * use for the callback address. Use the peer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * as a reasonable default for now, but consider fixing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * the rpc client not to require an address in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * future:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* caller must hold client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static struct nfsd4_session *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) struct nfsd4_session *elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dump_sessionid(__func__, sessionid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) idx = hash_sessionid(sessionid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /* Search in the appropriate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (!memcmp(elem->se_sessionid.data, sessionid->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) NFS4_MAX_SESSIONID_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) dprintk("%s: session not found\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static struct nfsd4_session *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) __be32 *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct nfsd4_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) __be32 status = nfserr_badsession;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) session = __find_in_sessionid_hashtbl(sessionid, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) status = nfsd4_get_session_locked(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) session = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) *ret = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /* caller must hold client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) unhash_session(struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) list_del(&ses->se_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) spin_lock(&ses->se_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) list_del(&ses->se_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) spin_unlock(&ses->se_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * We're assuming the clid was not given out from a boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * precisely 2^32 (about 136 years) before this one. That seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * a safe assumption:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (clid->cl_boot == (u32)nn->boot_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) trace_nfsd_clid_stale(clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * XXX Should we use a slab cache ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * This type of memory management is somewhat inefficient, but we use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * anyway since SETCLIENTID is not a common operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static struct nfs4_client *alloc_client(struct xdr_netobj name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (clp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (clp->cl_name.data == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) goto err_no_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!clp->cl_ownerstr_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) goto err_no_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) for (i = 0; i < OWNER_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) INIT_LIST_HEAD(&clp->cl_sessions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) idr_init(&clp->cl_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) atomic_set(&clp->cl_rpc_users, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) clp->cl_cb_state = NFSD4_CB_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) INIT_LIST_HEAD(&clp->cl_idhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) INIT_LIST_HEAD(&clp->cl_openowners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) INIT_LIST_HEAD(&clp->cl_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) INIT_LIST_HEAD(&clp->cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) INIT_LIST_HEAD(&clp->cl_revoked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) INIT_LIST_HEAD(&clp->cl_lo_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) INIT_LIST_HEAD(&clp->async_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) spin_lock_init(&clp->async_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) spin_lock_init(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) err_no_hashtbl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) kfree(clp->cl_name.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) err_no_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) kmem_cache_free(client_slab, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static void __free_client(struct kref *k)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) free_svc_cred(&clp->cl_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) kfree(clp->cl_ownerstr_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) kfree(clp->cl_name.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) kfree(clp->cl_nii_domain.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) kfree(clp->cl_nii_name.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) idr_destroy(&clp->cl_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) kmem_cache_free(client_slab, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static void drop_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) free_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) while (!list_empty(&clp->cl_sessions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct nfsd4_session *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) se_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) list_del(&ses->se_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) WARN_ON_ONCE(atomic_read(&ses->se_ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) free_session(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) rpc_destroy_wait_queue(&clp->cl_cb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (clp->cl_nfsd_dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) nfsd_client_rmdir(clp->cl_nfsd_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) clp->cl_nfsd_dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) wake_up_all(&expiry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) drop_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /* must be called under the client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) unhash_client_locked(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct nfsd4_session *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /* Mark the client as expired! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) clp->cl_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) /* Make it invisible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (!list_empty(&clp->cl_idhash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) list_del_init(&clp->cl_idhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) list_del_init(&clp->cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) list_del_init(&ses->se_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) unhash_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) unhash_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) static __be32 mark_client_expired_locked(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (atomic_read(&clp->cl_rpc_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) unhash_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) __destroy_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct list_head reaplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) INIT_LIST_HEAD(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) while (!list_empty(&clp->cl_delegations)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) WARN_ON(!unhash_delegation_locked(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) list_add(&dp->dl_recall_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) while (!list_empty(&reaplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) destroy_unhashed_deleg(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) while (!list_empty(&clp->cl_revoked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) while (!list_empty(&clp->cl_openowners)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) nfs4_get_stateowner(&oo->oo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) release_openowner(oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) for (i = 0; i < OWNER_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct nfs4_stateowner *so, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) so_strhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Should be no openowners at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) WARN_ON_ONCE(so->so_is_open_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) remove_blocked_locks(lockowner(so));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) nfsd4_return_all_client_layouts(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) nfsd4_shutdown_copy(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) nfsd4_shutdown_callback(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (clp->cl_cb_conn.cb_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) svc_xprt_put(clp->cl_cb_conn.cb_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) free_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) wake_up_all(&expiry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) destroy_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) unhash_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) __destroy_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) static void inc_reclaim_complete(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (!nn->track_reclaim_completes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (atomic_inc_return(&nn->nr_reclaim_complete) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) nn->reclaim_str_hashtbl_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) clp->net->ns.inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) nfsd4_end_grace(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static void expire_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) unhash_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) nfsd4_client_record_remove(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) __destroy_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) memcpy(target->cl_verifier.data, source->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) sizeof(target->cl_verifier.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) target->cl_clientid.cl_id = source->cl_clientid.cl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static int copy_cred(struct svc_cred *target, struct svc_cred *source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) target->cr_raw_principal = kstrdup(source->cr_raw_principal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if ((source->cr_principal && !target->cr_principal) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) (source->cr_raw_principal && !target->cr_raw_principal) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) (source->cr_targ_princ && !target->cr_targ_princ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) target->cr_flavor = source->cr_flavor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) target->cr_uid = source->cr_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) target->cr_gid = source->cr_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) target->cr_group_info = source->cr_group_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) get_group_info(target->cr_group_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) target->cr_gss_mech = source->cr_gss_mech;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (source->cr_gss_mech)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) gss_mech_get(source->cr_gss_mech);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) if (o1->len < o2->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (o1->len > o2->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return memcmp(o1->data, o2->data, o1->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) same_clid(clientid_t *cl1, clientid_t *cl2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) static bool groups_equal(struct group_info *g1, struct group_info *g2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (g1->ngroups != g2->ngroups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) for (i=0; i<g1->ngroups; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (!gid_eq(g1->gid[i], g2->gid[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * RFC 3530 language requires clid_inuse be returned when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * "principal" associated with a requests differs from that previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * used. We use uid, gid's, and gss principal string as our best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * approximation. We also don't want to allow non-gss use of a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * established using gss: in theory cr_principal should catch that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * change, but in practice cr_principal can be null even in the gss case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * since gssd doesn't always pass down a principal string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) static bool is_gss_cred(struct svc_cred *cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /* Is cr_flavor one of the gss "pseudoflavors"?: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if ((is_gss_cred(cr1) != is_gss_cred(cr2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) /* XXX: check that cr_targ_princ fields match ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (cr1->cr_principal == cr2->cr_principal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (!cr1->cr_principal || !cr2->cr_principal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct svc_cred *cr = &rqstp->rq_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) u32 service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (!cr->cr_gss_mech)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return service == RPC_GSS_SVC_INTEGRITY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) service == RPC_GSS_SVC_PRIVACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct svc_cred *cr = &rqstp->rq_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (!cl->cl_mach_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (!svc_rqst_integrity_protected(rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) if (cl->cl_cred.cr_raw_principal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return 0 == strcmp(cl->cl_cred.cr_raw_principal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) cr->cr_raw_principal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if (!cr->cr_principal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) __be32 verf[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * This is opaque to client, so no need to byte-swap. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * __force to keep sparse happy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) verf[1] = (__force __be32)nn->clverifier_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) clp->cl_clientid.cl_boot = (u32)nn->boot_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) clp->cl_clientid.cl_id = nn->clientid_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) gen_confirm(clp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static struct nfs4_stid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct nfs4_stid *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (!ret || !ret->sc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) static struct nfs4_stid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct nfs4_stid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) spin_lock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) s = find_stateid_locked(cl, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (s != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (typemask & s->sc_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) refcount_inc(&s->sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct nfsdfs_client *nc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) nc = get_nfsdfs_client(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (!nc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return container_of(nc, struct nfs4_client, cl_nfsdfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) static void seq_quote_mem(struct seq_file *m, char *data, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) seq_printf(m, "\"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) seq_escape_mem_ascii(m, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) seq_printf(m, "\"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static int client_info_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct inode *inode = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) u64 clid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) clp = get_nfsdfs_clp(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (!clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) memcpy(&clid, &clp->cl_clientid, sizeof(clid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) seq_printf(m, "clientid: 0x%llx\n", clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) seq_printf(m, "name: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (clp->cl_nii_domain.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) seq_printf(m, "Implementation domain: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) seq_quote_mem(m, clp->cl_nii_domain.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) clp->cl_nii_domain.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) seq_printf(m, "\nImplementation name: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) drop_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) static int client_info_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return single_open(file, client_info_show, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static const struct file_operations client_info_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) .open = client_info_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static void *states_start(struct seq_file *s, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) __acquires(&clp->cl_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) struct nfs4_client *clp = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) unsigned long id = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) ret = idr_get_next_ul(&clp->cl_stateids, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) *pos = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static void *states_next(struct seq_file *s, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct nfs4_client *clp = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) unsigned long id = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) id = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) ret = idr_get_next_ul(&clp->cl_stateids, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) *pos = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) static void states_stop(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) __releases(&clp->cl_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) struct nfs4_client *clp = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) seq_printf(s, "filename: \"%pD2\"", f->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct inode *inode = f->nf_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) MAJOR(inode->i_sb->s_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) MINOR(inode->i_sb->s_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) seq_printf(s, "owner: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) seq_printf(s, "0x%.8x", stid->si_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) seq_printf(s, "%12phN", &stid->si_opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct nfs4_ol_stateid *ols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) struct nfs4_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) struct nfsd_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) struct nfs4_stateowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) unsigned int access, deny;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return 0; /* XXX: or SEQ_SKIP? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) ols = openlockstateid(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) oo = ols->st_stateowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) nf = st->sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) file = find_any_file(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) seq_printf(s, "- ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) nfs4_show_stateid(s, &st->sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) seq_printf(s, ": { type: open, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) access = bmap_to_share_mode(ols->st_access_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) deny = bmap_to_share_mode(ols->st_deny_bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) seq_printf(s, "access: %s%s, ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) seq_printf(s, "deny: %s%s, ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) nfs4_show_superblock(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) nfs4_show_fname(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) nfs4_show_owner(s, oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) seq_printf(s, " }\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) nfsd_file_put(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) struct nfs4_ol_stateid *ols;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) struct nfs4_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) struct nfsd_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct nfs4_stateowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) ols = openlockstateid(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) oo = ols->st_stateowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) nf = st->sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) file = find_any_file(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) seq_printf(s, "- ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) nfs4_show_stateid(s, &st->sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) seq_printf(s, ": { type: lock, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * Note: a lock stateid isn't really the same thing as a lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * it's the locking state held by one owner on a file, and there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * may be multiple (or no) lock ranges associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * (Same for the matter is true of open stateids.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) nfs4_show_superblock(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) /* XXX: open stateid? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) nfs4_show_fname(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) nfs4_show_owner(s, oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) seq_printf(s, " }\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) nfsd_file_put(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct nfs4_delegation *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) struct nfs4_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) struct nfsd_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) ds = delegstateid(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) nf = st->sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) file = find_deleg_file(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) seq_printf(s, "- ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) nfs4_show_stateid(s, &st->sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) seq_printf(s, ": { type: deleg, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) /* Kinda dead code as long as we only support read delegs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) seq_printf(s, "access: %s, ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) /* XXX: lease time, whether it's being recalled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) nfs4_show_superblock(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) nfs4_show_fname(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) seq_printf(s, " }\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) nfsd_file_put(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct nfs4_layout_stateid *ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct nfsd_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) file = ls->ls_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) seq_printf(s, "- ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) nfs4_show_stateid(s, &st->sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) seq_printf(s, ": { type: layout, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /* XXX: What else would be useful? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) nfs4_show_superblock(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) seq_printf(s, ", ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) nfs4_show_fname(s, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) seq_printf(s, " }\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) static int states_show(struct seq_file *s, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct nfs4_stid *st = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) switch (st->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) case NFS4_OPEN_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return nfs4_show_open(s, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) case NFS4_LOCK_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return nfs4_show_lock(s, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) case NFS4_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) return nfs4_show_deleg(s, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) case NFS4_LAYOUT_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) return nfs4_show_layout(s, st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return 0; /* XXX: or SEQ_SKIP? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* XXX: copy stateids? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) static struct seq_operations states_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) .start = states_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) .next = states_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) .stop = states_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) .show = states_show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static int client_states_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct seq_file *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) clp = get_nfsdfs_clp(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (!clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) ret = seq_open(file, &states_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) s = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) s->private = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) static int client_opens_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) struct nfs4_client *clp = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) /* XXX: alternatively, we could get/drop in seq start/stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) drop_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static const struct file_operations client_states_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) .open = client_states_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) .release = client_opens_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * Normally we refuse to destroy clients that are in use, but here the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * administrator is telling us to just do it. We also want to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * so the caller has a guarantee that the client's locks are gone by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * the time the write returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) static void force_expire_client(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) bool already_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) clp->cl_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) already_expired = list_empty(&clp->cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (!already_expired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) unhash_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (!already_expired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) expire_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static ssize_t client_ctl_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) size_t size, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) data = simple_transaction_get(file, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (IS_ERR(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) return PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (size != 7 || 0 != memcmp(data, "expire\n", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) clp = get_nfsdfs_clp(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (!clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) force_expire_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) drop_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) return 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static const struct file_operations client_ctl_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) .write = client_ctl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) .release = simple_transaction_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static const struct tree_descr client_files[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) [0] = {"info", &client_info_fops, S_IRUSR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) [1] = {"states", &client_states_fops, S_IRUSR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) [2] = {"ctl", &client_ctl_fops, S_IWUSR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) [3] = {""},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) static struct nfs4_client *create_client(struct xdr_netobj name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct svc_rqst *rqstp, nfs4_verifier *verf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct sockaddr *sa = svc_addr(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) clp = alloc_client(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (clp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) free_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) gen_clid(clp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) kref_init(&clp->cl_nfsdfs.cl_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) clp->cl_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) clear_bit(0, &clp->cl_cb_slot_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) copy_verf(clp, verf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) clp->cl_cb_session = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) clp->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) clp->cl_nfsd_dentry = nfsd_client_mkdir(nn, &clp->cl_nfsdfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) clp->cl_clientid.cl_id - nn->clientid_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) client_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) if (!clp->cl_nfsd_dentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) free_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct rb_node **new = &(root->rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) clp = rb_entry(*new, struct nfs4_client, cl_namenode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) rb_link_node(&new_clp->cl_namenode, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) rb_insert_color(&new_clp->cl_namenode, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) struct rb_node *node = root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) clp = rb_entry(node, struct nfs4_client, cl_namenode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) cmp = compare_blob(&clp->cl_name, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) else if (cmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) return clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) add_to_unconfirmed(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) unsigned int idhashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) add_clp_to_name_tree(clp, &nn->unconf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) idhashval = clientid_hashval(clp->cl_clientid.cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) renew_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) move_to_confirmed(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) add_clp_to_name_tree(clp, &nn->conf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) renew_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) unsigned int idhashval = clientid_hashval(clid->cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) if (same_clid(&clp->cl_clientid, clid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if ((bool)clp->cl_minorversion != sessions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) renew_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) return clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) struct list_head *tbl = nn->conf_id_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) return find_client_in_id_table(tbl, clid, sessions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) struct list_head *tbl = nn->unconf_id_hashtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) return find_client_in_id_table(tbl, clid, sessions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) static bool clp_used_exchangeid(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) return clp->cl_exchange_flags != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return find_clp_in_name_tree(name, &nn->conf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) static struct nfs4_client *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) lockdep_assert_held(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return find_clp_in_name_tree(name, &nn->unconf_name_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) struct sockaddr *sa = svc_addr(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) u32 scopeid = rpc_get_scope_id(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) unsigned short expected_family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /* Currently, we only support tcp and tcp6 for the callback channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (se->se_callback_netid_len == 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) !memcmp(se->se_callback_netid_val, "tcp", 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) expected_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) else if (se->se_callback_netid_len == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) !memcmp(se->se_callback_netid_val, "tcp6", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) expected_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) se->se_callback_addr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) (struct sockaddr *)&conn->cb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) sizeof(conn->cb_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (conn->cb_addr.ss_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) conn->cb_prog = se->se_callback_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) conn->cb_ident = se->se_callback_ident;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) trace_nfsd_cb_args(clp, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) conn->cb_addr.ss_family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) conn->cb_addrlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) trace_nfsd_cb_nodelegs(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) struct xdr_buf *buf = resp->xdr.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) struct nfsd4_slot *slot = resp->cstate.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) unsigned int base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) dprintk("--> %s slot %p\n", __func__, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) slot->sl_opcnt = resp->opcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) slot->sl_status = resp->cstate.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) free_svc_cred(&slot->sl_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (!nfsd4_cache_this(resp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) slot->sl_flags &= ~NFSD4_SLOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) slot->sl_flags |= NFSD4_SLOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) base = resp->cstate.data_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) slot->sl_datalen = buf->len - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) WARN(1, "%s: sessions DRC could not cache compound\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * Encode the replay sequence operation from the slot values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * If cachethis is FALSE encode the uncached rep error on the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * operation which sets resp->p and increments resp->opcnt for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) * nfs4svc_encode_compoundres.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) struct nfsd4_compoundres *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) struct nfsd4_op *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct nfsd4_slot *slot = resp->cstate.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) /* Encode the replayed sequence operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) op = &args->ops[resp->opcnt - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) nfsd4_encode_operation(resp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (slot->sl_flags & NFSD4_SLOT_CACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) return op->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) if (args->opcnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * The original operation wasn't a solo sequence--we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * always cache those--so this retry must not match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * original:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) op->status = nfserr_seq_false_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) op = &args->ops[resp->opcnt++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) op->status = nfserr_retry_uncached_rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) nfsd4_encode_operation(resp, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) return op->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) * The sequence operation is not cached because we can use the slot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * session values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct nfsd4_sequence *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) struct nfsd4_slot *slot = resp->cstate.slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct xdr_stream *xdr = &resp->xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) dprintk("--> %s slot %p\n", __func__, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) p = xdr_reserve_space(xdr, slot->sl_datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return nfserr_serverfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) xdr_commit_encode(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) resp->opcnt = slot->sl_opcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return slot->sl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) * Set the exchange_id flags returned by the server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /* Referrals are supported, Migration is not. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) /* set the wire flags to return to client. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) clid->flags = new->cl_exchange_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) static bool client_has_openowners(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (!list_empty(&oo->oo_owner.so_stateids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static bool client_has_state(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return client_has_openowners(clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) || !list_empty(&clp->cl_lo_states)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) || !list_empty(&clp->cl_delegations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) || !list_empty(&clp->cl_sessions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) || !list_empty(&clp->async_copies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) static __be32 copy_impl_id(struct nfs4_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) struct nfsd4_exchange_id *exid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) if (!exid->nii_domain.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) if (!clp->cl_nii_domain.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (!clp->cl_nii_name.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) clp->cl_nii_time = exid->nii_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) struct nfsd4_exchange_id *exid = &u->exchange_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct nfs4_client *conf, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct nfs4_client *unconf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) char addr_str[INET6_ADDRSTRLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) nfs4_verifier verf = exid->verifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct sockaddr *sa = svc_addr(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) rpc_ntop(sa, addr_str, sizeof(addr_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) "ip_addr=%s flags %x, spa_how %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) __func__, rqstp, exid, exid->clname.len, exid->clname.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) addr_str, exid->flags, exid->spa_how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) new = create_client(exid->clname, rqstp, &verf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) status = copy_impl_id(new, exid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) switch (exid->spa_how) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) case SP4_MACH_CRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) exid->spo_must_enforce[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) exid->spo_must_enforce[1] = (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 1 << (OP_BIND_CONN_TO_SESSION - 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 1 << (OP_EXCHANGE_ID - 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 1 << (OP_CREATE_SESSION - 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 1 << (OP_DESTROY_SESSION - 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 1 << (OP_DESTROY_CLIENTID - 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 1 << (OP_OPEN_DOWNGRADE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 1 << (OP_LOCKU) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 1 << (OP_DELEGRETURN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) exid->spo_must_allow[1] &= (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 1 << (OP_TEST_STATEID - 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 1 << (OP_FREE_STATEID - 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (!svc_rqst_integrity_protected(rqstp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) * Sometimes userspace doesn't give us a principal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) * Which is a bug, really. Anyway, we can't enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) * MACH_CRED in that case, better to give up now:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (!new->cl_cred.cr_principal &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) !new->cl_cred.cr_raw_principal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) status = nfserr_serverfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) new->cl_mach_cred = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) case SP4_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) default: /* checked by xdr code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) case SP4_SSV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) status = nfserr_encr_alg_unsupp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) /* Cases below refer to rfc 5661 section 18.35.4: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) conf = find_confirmed_client_by_name(&exid->clname, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) bool verfs_match = same_verf(&verf, &conf->cl_verifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (!clp_used_exchangeid(conf)) { /* buggy client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (!nfsd4_mach_creds_match(conf, rqstp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (!creds_match) { /* case 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) status = nfserr_perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (!verfs_match) { /* case 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) status = nfserr_not_same;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) /* case 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (!creds_match) { /* case 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (client_has_state(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) status = nfserr_clid_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) goto out_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) if (verfs_match) { /* case 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) goto out_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) /* case 5, client reboot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) conf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) goto out_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) if (update) { /* case 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) status = nfserr_noent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (unconf) /* case 4, possible retry or client restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) unhash_client_locked(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /* case 1 (normal case) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) out_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) status = mark_client_expired_locked(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) new->cl_minorversion = cstate->minorversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) add_to_unconfirmed(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) swap(new, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) out_copy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) exid->clientid.cl_id = conf->cl_clientid.cl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) nfsd4_set_ex_flags(conf, exid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) dprintk("nfsd4_exchange_id seqid %d flags %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) out_nolock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) expire_client(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (unconf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) expire_client(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) slot_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) /* The slot is in use, and no response has been sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) if (slot_inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (seqid == slot_seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) return nfserr_seq_misordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) /* Note unsigned 32-bit arithmetic handles wraparound: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (likely(seqid == slot_seqid + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) if (seqid == slot_seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return nfserr_replay_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return nfserr_seq_misordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * Cache the create session result into the create session single DRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) * slot cache by saving the xdr structure. sl_seqid has been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) * Do this for solo or embedded create session operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) struct nfsd4_clid_slot *slot, __be32 nfserr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) slot->sl_status = nfserr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) struct nfsd4_clid_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) return slot->sl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 1 + /* MIN tag is length with zero, only length */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 3 + /* version, opcount, opcode */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) /* seqid, slotID, slotID, cache */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 4 ) * sizeof(__be32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 2 + /* verifier: AUTH_NULL, length 0 */\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 1 + /* status */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 1 + /* MIN tag is length with zero, only length */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 3 + /* opcount, opcode, opstatus*/ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) /* seqid, slotID, slotID, slotID, status */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 5 ) * sizeof(__be32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) return nfserr_toosmall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) return nfserr_toosmall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) ca->headerpadsz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) * Note decreasing slot size below client's request may make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) * difficult for client to function correctly, whereas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) * decreasing the number of slots will (just?) affect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) * performance. When short on memory we therefore prefer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) * decrease number of slots instead of their size. Clients that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) * request larger slots than they need will get poor results:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) * Note that we always allow at least one slot, because our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) * accounting is soft and provides no guarantees either way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * These are based on similar macros in linux/sunrpc/msg_prot.h .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) #define RPC_MAX_HEADER_WITH_AUTH_SYS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) sizeof(__be32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) ca->headerpadsz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) return nfserr_toosmall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) return nfserr_toosmall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) ca->maxresp_cached = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) if (ca->maxops < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) return nfserr_toosmall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) switch (cbs->flavor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) case RPC_AUTH_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) case RPC_AUTH_UNIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) * GSS case: the spec doesn't allow us to return this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) * error. But it also doesn't allow us not to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) * GSS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) * I'd rather this fail hard than return some error the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) * client might think it can already handle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return nfserr_encr_alg_unsupp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) nfsd4_create_session(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) struct nfsd4_create_session *cr_ses = &u->create_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) struct sockaddr *sa = svc_addr(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) struct nfs4_client *conf, *unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) struct nfs4_client *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) struct nfsd4_session *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) struct nfsd4_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) struct nfsd4_clid_slot *cs_slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) __be32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) status = check_backchannel_attrs(&cr_ses->back_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) goto out_release_drc_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) goto out_release_drc_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) conn = alloc_conn_from_crses(rqstp, cr_ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) goto out_free_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) conf = find_confirmed_client(&cr_ses->clientid, true, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) WARN_ON_ONCE(conf && unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) if (!nfsd4_mach_creds_match(conf, rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) cs_slot = &conf->cl_cs_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (status == nfserr_replay_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) status = nfsd4_replay_create_session(cr_ses, cs_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) } else if (unconf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) status = nfserr_clid_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) if (!nfsd4_mach_creds_match(unconf, rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) cs_slot = &unconf->cl_cs_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) /* an unconfirmed replay returns misordered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) status = nfserr_seq_misordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) old = find_confirmed_client_by_name(&unconf->cl_name, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) status = mark_client_expired_locked(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) move_to_confirmed(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) conf = unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) status = nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) goto out_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) /* Persistent sessions are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) cr_ses->flags &= ~SESSION4_PERSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) /* Upshifting from TCP to RDMA is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) cr_ses->flags &= ~SESSION4_RDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) init_session(rqstp, new, conf, cr_ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) nfsd4_get_session_locked(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) NFS4_MAX_SESSIONID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) cs_slot->sl_seqid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) cr_ses->seqid = cs_slot->sl_seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) /* cache solo and embedded create sessions under the client_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) nfsd4_cache_create_session(cr_ses, cs_slot, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) /* init connection and backchannel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) nfsd4_init_conn(rqstp, conn, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) nfsd4_put_session(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) expire_client(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) out_free_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) free_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) expire_client(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) out_free_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) __free_session(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) out_release_drc_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) nfsd4_put_drc_mem(&cr_ses->fore_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) static __be32 nfsd4_map_bcts_dir(u32 *dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) switch (*dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) case NFS4_CDFC4_FORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) case NFS4_CDFC4_BACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) case NFS4_CDFC4_FORE_OR_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) case NFS4_CDFC4_BACK_OR_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) *dir = NFS4_CDFC4_BOTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct nfsd4_session *session = cstate->session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) session->se_cb_prog = bc->bc_cb_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) session->se_cb_sec = bc->bc_cb_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) nfsd4_probe_callback(session->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) list_for_each_entry(c, &s->se_conns, cn_persession) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (c->cn_xprt == xpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) struct nfs4_client *clp = session->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) struct svc_xprt *xpt = rqst->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) /* Following the last paragraph of RFC 5661 Section 18.34.3: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) c = __nfsd4_find_conn(xpt, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) status = nfserr_noent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) else if (req == c->cn_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) c->cn_flags != NFS4_CDFC4_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) c->cn_flags != NFS4_CDFC4_FORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (status == nfs_ok && conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) *conn = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) struct nfsd4_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) struct nfsd4_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (!nfsd4_last_compound_op(rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) return nfserr_not_only_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) goto out_no_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) if (!nfsd4_mach_creds_match(session->se_client, rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) status = nfsd4_match_existing_connection(rqstp, session,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) bcts->dir, &conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (status == nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) bcts->dir == NFS4_CDFC4_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) conn->cn_flags |= NFS4_CDFC4_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) nfsd4_probe_callback(session->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) if (status == nfserr_inval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) status = nfsd4_map_bcts_dir(&bcts->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) conn = alloc_conn(rqstp, bcts->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) nfsd4_init_conn(rqstp, conn, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) nfsd4_put_session(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) out_no_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) if (!cstate->session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) struct nfsd4_session *ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) int ref_held_by_me = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) struct net *net = SVC_NET(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) status = nfserr_not_only_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) if (nfsd4_compound_in_session(cstate, sessionid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) if (!nfsd4_last_compound_op(r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) ref_held_by_me++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) dump_sessionid(__func__, sessionid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) ses = find_in_sessionid_hashtbl(sessionid, net, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) if (!ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) goto out_client_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) if (!nfsd4_mach_creds_match(ses->se_client, r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) unhash_session(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) nfsd4_probe_callback_sync(ses->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) out_put_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) nfsd4_put_session_locked(ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) out_client_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) struct nfs4_client *clp = ses->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) __be32 status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) c = __nfsd4_find_conn(new->cn_xprt, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) status = nfserr_conn_not_bound_to_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) if (clp->cl_mach_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) __nfsd4_hash_conn(new, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) ret = nfsd4_register_conn(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) /* oops; xprt is already down: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) nfsd4_conn_lost(&new->cn_xpt_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) free_conn(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) struct nfsd4_compoundargs *args = rqstp->rq_argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) return args->opcnt > session->se_fchannel.maxops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) struct nfsd4_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) struct xdr_buf *xb = &rqstp->rq_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) return xb->len > session->se_fchannel.maxreq_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) static bool replay_matches_cache(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) struct nfsd4_compoundargs *argp = rqstp->rq_argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) (bool)seq->cachethis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) * If there's an error then the reply can have fewer ops than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) * the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) * But if we cached a reply with *more* ops than the call you're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) * sending us now, then this new call is clearly not really a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) * replay of the old one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) if (slot->sl_opcnt > argp->opcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) /* This is the only check explicitly called by spec: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) * There may be more comparisons we could actually do, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) * spec doesn't require us to catch every case where the calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) * don't match (that would require caching the call as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) * the reply), so we don't bother.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) struct nfsd4_sequence *seq = &u->sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) struct nfsd4_compoundres *resp = rqstp->rq_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) struct xdr_stream *xdr = &resp->xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) struct nfsd4_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) struct nfsd4_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) struct nfsd4_conn *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) int buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if (resp->opcnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) return nfserr_sequence_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) * Will be either used or freed by nfsd4_sequence_check_conn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) * below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) if (!session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) goto out_no_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) clp = session->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) status = nfserr_too_many_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) if (nfsd4_session_too_many_ops(rqstp, session))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) status = nfserr_req_too_big;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (nfsd4_request_too_big(rqstp, session))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) status = nfserr_badslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (seq->slotid >= session->se_fchannel.maxreqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) slot = session->se_slots[seq->slotid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) dprintk("%s: slotid %d\n", __func__, seq->slotid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) /* We do not negotiate the number of slots yet, so set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) * maxslots to the session maxreqs which is used to encode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) * sr_highest_slotid and the sr_target_slot id to maxslots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) seq->maxslots = session->se_fchannel.maxreqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) status = check_slot_seqid(seq->seqid, slot->sl_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) slot->sl_flags & NFSD4_SLOT_INUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (status == nfserr_replay_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) status = nfserr_seq_misordered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) status = nfserr_seq_false_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) if (!replay_matches_cache(rqstp, seq, slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) cstate->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) cstate->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) cstate->clp = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) /* Return the cached reply status and set cstate->status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) * for nfsd4_proc_compound processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) status = nfsd4_replay_cache_entry(resp, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) cstate->status = nfserr_replay_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) status = nfsd4_sequence_check_conn(conn, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) conn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) buflen = (seq->cachethis) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) session->se_fchannel.maxresp_cached :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) session->se_fchannel.maxresp_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) nfserr_rep_too_big;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) goto out_put_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) svc_reserve(rqstp, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) /* Success! bump slot seqid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) slot->sl_seqid = seq->seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) slot->sl_flags |= NFSD4_SLOT_INUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) if (seq->cachethis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) cstate->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) cstate->session = session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) cstate->clp = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) switch (clp->cl_cb_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) case NFSD4_CB_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) case NFSD4_CB_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) seq->status_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) if (!list_empty(&clp->cl_revoked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) out_no_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) free_conn(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) out_put_session:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) nfsd4_put_session_locked(session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) goto out_no_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) nfsd4_sequence_done(struct nfsd4_compoundres *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) struct nfsd4_compound_state *cs = &resp->cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) if (nfsd4_has_session(cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) if (cs->status != nfserr_replay_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) nfsd4_store_cache_entry(resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) /* Drop session reference that was taken in nfsd4_sequence() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) nfsd4_put_session(cs->session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) } else if (cs->clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) put_client_renew(cs->clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) nfsd4_destroy_clientid(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) struct nfs4_client *conf, *unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) struct nfs4_client *clp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) __be32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) unconf = find_unconfirmed_client(&dc->clientid, true, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) conf = find_confirmed_client(&dc->clientid, true, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) WARN_ON_ONCE(conf && unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (conf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) if (client_has_state(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) status = nfserr_clientid_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) status = mark_client_expired_locked(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) clp = conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) } else if (unconf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) clp = unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) status = nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) if (!nfsd4_mach_creds_match(clp, rqstp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) clp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) status = nfserr_wrong_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) unhash_client_locked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) if (clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) expire_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) nfsd4_reclaim_complete(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) __be32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) if (rc->rca_one_fs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) if (!cstate->current_fh.fh_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) return nfserr_nofilehandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) * We don't take advantage of the rca_one_fs case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) * That's OK, it's optional, we can safely ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) status = nfserr_complete_already;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) &cstate->session->se_client->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) status = nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) if (is_client_expired(cstate->session->se_client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) * The following error isn't really legal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) * But we only get here if the client just explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) * destroyed the client. Surely it no longer cares what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) * error it gets back on an operation for the dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) * client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) nfsd4_client_record_create(cstate->session->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) inc_reclaim_complete(cstate->session->se_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) struct nfsd4_setclientid *setclid = &u->setclientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) struct xdr_netobj clname = setclid->se_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) nfs4_verifier clverifier = setclid->se_verf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) struct nfs4_client *conf, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) struct nfs4_client *unconf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) new = create_client(clname, rqstp, &clverifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) /* Cases below refer to rfc 3530 section 14.2.33: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) conf = find_confirmed_client_by_name(&clname, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) if (conf && client_has_state(conf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) /* case 0: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) status = nfserr_clid_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) if (clp_used_exchangeid(conf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) trace_nfsd_clid_inuse_err(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) unconf = find_unconfirmed_client_by_name(&clname, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) if (unconf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) unhash_client_locked(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) /* We need to handle only case 1: probable callback update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) copy_clid(new, conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) gen_confirm(new, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) new->cl_minorversion = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) gen_callback(new, setclid, rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) add_to_unconfirmed(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) free_client(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) if (unconf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) expire_client(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) struct nfsd4_setclientid_confirm *setclientid_confirm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) &u->setclientid_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) struct nfs4_client *conf, *unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) struct nfs4_client *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) nfs4_verifier confirm = setclientid_confirm->sc_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) clientid_t * clid = &setclientid_confirm->sc_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (STALE_CLIENTID(clid, nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) return nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) conf = find_confirmed_client(clid, false, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) unconf = find_unconfirmed_client(clid, false, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) * We try hard to give out unique clientid's, so if we get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) * attempt to confirm the same clientid with a different cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) * the client may be buggy; this should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * Nevertheless, RFC 7530 recommends INUSE for this case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) status = nfserr_clid_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) /* cases below refer to rfc 3530 section 14.2.34: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (conf && same_verf(&confirm, &conf->cl_confirm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) /* case 2: probable retransmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) } else /* case 4: client hasn't noticed we rebooted yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) status = nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) if (conf) { /* case 1: callback update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) old = unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) unhash_client_locked(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) nfsd4_change_callback(conf, &unconf->cl_cb_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) } else { /* case 3: normal case; new or rebooted client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) old = find_confirmed_client_by_name(&unconf->cl_name, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) status = nfserr_clid_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) if (client_has_state(old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) && !same_creds(&unconf->cl_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) &old->cl_cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) status = mark_client_expired_locked(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) move_to_confirmed(unconf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) conf = unconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) get_client_locked(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) nfsd4_probe_callback(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) put_client_renew_locked(conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) expire_client(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) static struct nfs4_file *nfsd4_alloc_file(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) return kmem_cache_alloc(file_slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /* OPEN Share state helper functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) struct nfs4_file *fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) lockdep_assert_held(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) refcount_set(&fp->fi_ref, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) spin_lock_init(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) INIT_LIST_HEAD(&fp->fi_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) INIT_LIST_HEAD(&fp->fi_delegations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) INIT_LIST_HEAD(&fp->fi_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) fh_copy_shallow(&fp->fi_fhandle, fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) fp->fi_deleg_file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) fp->fi_had_conflict = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) fp->fi_share_deny = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) memset(fp->fi_access, 0, sizeof(fp->fi_access));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) INIT_LIST_HEAD(&fp->fi_lo_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) atomic_set(&fp->fi_lo_recalls, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) nfsd4_free_slabs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) kmem_cache_destroy(client_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) kmem_cache_destroy(openowner_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) kmem_cache_destroy(lockowner_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) kmem_cache_destroy(file_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) kmem_cache_destroy(stateid_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) kmem_cache_destroy(deleg_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) kmem_cache_destroy(odstate_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) nfsd4_init_slabs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) client_slab = kmem_cache_create("nfsd4_clients",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) sizeof(struct nfs4_client), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) if (client_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) openowner_slab = kmem_cache_create("nfsd4_openowners",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) sizeof(struct nfs4_openowner), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) if (openowner_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) goto out_free_client_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) lockowner_slab = kmem_cache_create("nfsd4_lockowners",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) sizeof(struct nfs4_lockowner), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (lockowner_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) goto out_free_openowner_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) file_slab = kmem_cache_create("nfsd4_files",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) sizeof(struct nfs4_file), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (file_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) goto out_free_lockowner_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) stateid_slab = kmem_cache_create("nfsd4_stateids",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (stateid_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) goto out_free_file_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) deleg_slab = kmem_cache_create("nfsd4_delegations",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) sizeof(struct nfs4_delegation), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) if (deleg_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) goto out_free_stateid_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) odstate_slab = kmem_cache_create("nfsd4_odstate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) if (odstate_slab == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) goto out_free_deleg_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) out_free_deleg_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) kmem_cache_destroy(deleg_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) out_free_stateid_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) kmem_cache_destroy(stateid_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) out_free_file_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) kmem_cache_destroy(file_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) out_free_lockowner_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) kmem_cache_destroy(lockowner_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) out_free_openowner_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) kmem_cache_destroy(openowner_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) out_free_client_slab:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) kmem_cache_destroy(client_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) static void init_nfs4_replay(struct nfs4_replay *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) rp->rp_status = nfserr_serverfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) rp->rp_buflen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) rp->rp_buf = rp->rp_ibuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) mutex_init(&rp->rp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) struct nfs4_stateowner *so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (!nfsd4_has_session(cstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) mutex_lock(&so->so_replay.rp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) cstate->replay_owner = nfs4_get_stateowner(so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) struct nfs4_stateowner *so = cstate->replay_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) if (so != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) cstate->replay_owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) mutex_unlock(&so->so_replay.rp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) nfs4_put_stateowner(so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) struct nfs4_stateowner *sop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) sop = kmem_cache_alloc(slab, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) if (!sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) if (!sop->so_owner.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) kmem_cache_free(slab, sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) INIT_LIST_HEAD(&sop->so_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) sop->so_client = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) init_nfs4_replay(&sop->so_replay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) atomic_set(&sop->so_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) return sop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) list_add(&oo->oo_owner.so_strhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) &clp->cl_ownerstr_hashtbl[strhashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) list_add(&oo->oo_perclient, &clp->cl_openowners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) unhash_openowner_locked(openowner(so));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) static void nfs4_free_openowner(struct nfs4_stateowner *so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) struct nfs4_openowner *oo = openowner(so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) kmem_cache_free(openowner_slab, oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) static const struct nfs4_stateowner_operations openowner_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) .so_unhash = nfs4_unhash_openowner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) .so_free = nfs4_free_openowner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) struct nfs4_ol_stateid *local, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) struct nfs4_openowner *oo = open->op_openowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) /* ignore lock owners */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) if (local->st_stateowner->so_is_open_owner == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) if (local->st_stateowner != &oo->oo_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) if (local->st_stid.sc_type == NFS4_OPEN_STID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) ret = local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) refcount_inc(&ret->st_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) nfsd4_verify_open_stid(struct nfs4_stid *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) __be32 ret = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) switch (s->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) case NFS4_CLOSED_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) case NFS4_CLOSED_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) ret = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) case NFS4_REVOKED_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) ret = nfserr_deleg_revoked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) /* Lock the stateid st_mutex, and deal with races with CLOSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) __be32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) ret = nfsd4_verify_open_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) if (ret != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) stp = nfsd4_find_existing_open(fp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) return stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) static struct nfs4_openowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) struct nfsd4_compound_state *cstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) struct nfs4_client *clp = cstate->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) struct nfs4_openowner *oo, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) if (!oo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) oo->oo_owner.so_ops = &openowner_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) oo->oo_owner.so_is_open_owner = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) oo->oo_owner.so_seqid = open->op_seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) oo->oo_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) if (nfsd4_has_session(cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) oo->oo_flags |= NFS4_OO_CONFIRMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) oo->oo_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) oo->oo_last_closed_stid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) INIT_LIST_HEAD(&oo->oo_close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) ret = find_openstateowner_str_locked(strhashval, open, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (ret == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) hash_openowner(oo, clp, strhashval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) ret = oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) nfs4_free_stateowner(&oo->oo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) struct nfs4_openowner *oo = open->op_openowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) struct nfs4_ol_stateid *retstp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) stp = open->op_stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) /* We are moving these outside of the spinlocks to avoid the warnings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) mutex_init(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) spin_lock(&oo->oo_owner.so_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) retstp = nfsd4_find_existing_open(fp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (retstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) open->op_stp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) refcount_inc(&stp->st_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) stp->st_stid.sc_type = NFS4_OPEN_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) INIT_LIST_HEAD(&stp->st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) get_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) stp->st_stid.sc_file = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) stp->st_access_bmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) stp->st_deny_bmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) stp->st_openstp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) list_add(&stp->st_perfile, &fp->fi_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) spin_unlock(&oo->oo_owner.so_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) if (retstp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) /* Handle races with CLOSE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) nfs4_put_stid(&retstp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /* To keep mutex tracking happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) stp = retstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) return stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) * In the 4.0 case we need to keep the owners around a little while to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) * CLOSE replay. We still do need to release any file access that is held by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) * them before returning however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) struct nfs4_ol_stateid *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) struct nfs4_openowner *oo = openowner(s->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) * We know that we hold one reference via nfsd4_close, and another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) * "persistent" reference for the client. If the refcount is higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) * than 2, then there are still calls in progress that are using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) * stateid. We can't put the sc_file reference until they are finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) * Wait for the refcount to drop to 2. Since it has been unhashed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) * there should be no danger of the refcount going back up again at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) * this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) release_all_access(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) if (s->st_stid.sc_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) put_nfs4_file(s->st_stid.sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) s->st_stid.sc_file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) last = oo->oo_last_closed_stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) oo->oo_last_closed_stid = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) list_move_tail(&oo->oo_close_lru, &nn->close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) oo->oo_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) nfs4_put_stid(&last->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) /* search file_hashtbl[] for file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) static struct nfs4_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) lockdep_is_held(&state_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) if (fh_match(&fp->fi_fhandle, fh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (refcount_inc_not_zero(&fp->fi_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) struct nfs4_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) find_file(struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) unsigned int hashval = file_hashval(fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) fp = find_file_locked(fh, hashval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) static struct nfs4_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) unsigned int hashval = file_hashval(fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) fp = find_file_locked(fh, hashval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) if (fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) fp = find_file_locked(fh, hashval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) if (likely(fp == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) nfsd4_init_file(fh, hashval, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) fp = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) return fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) * Called to check deny when READ with all zero stateid or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) * WRITE with all zero or all one stateid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) __be32 ret = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) fp = find_file(¤t_fh->fh_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) if (!fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) /* Check for conflicting share reservations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (fp->fi_share_deny & deny_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) ret = nfserr_locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) put_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) struct nfs4_delegation *dp = cb_to_delegation(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) * We can't do this in nfsd_break_deleg_cb because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) * already holding inode->i_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) * If the dl_time != 0, then we know that it has already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) * queued for a lease break. Don't queue it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) if (delegation_hashed(dp) && dp->dl_time == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) dp->dl_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) struct nfs4_delegation *dp = cb_to_delegation(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) rpc_delay(task, 2 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) case -EBADHANDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) case -NFS4ERR_BAD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) * Race: client probably got cb_recall before open reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) * granting delegation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) if (dp->dl_retries--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) rpc_delay(task, 2 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) struct nfs4_delegation *dp = cb_to_delegation(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) .prepare = nfsd4_cb_recall_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) .done = nfsd4_cb_recall_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) .release = nfsd4_cb_recall_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) * We're assuming the state code never drops its reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) * without first removing the lease. Since we're in this lease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) * callback (and since the lease code is serialized by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) * i_lock) we know the server hasn't removed the lease yet, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) * we know it's safe to take a reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) refcount_inc(&dp->dl_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) nfsd4_run_cb(&dp->dl_recall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) /* Called from break_lease() with i_lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) nfsd_break_deleg_cb(struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) struct nfs4_file *fp = dp->dl_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * We don't want the locks code to timeout the lease for us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * we'll remove it ourself if a delegation isn't returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) * in time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) fl->fl_break_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) fp->fi_had_conflict = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) nfsd_break_one_deleg(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) * nfsd_breaker_owns_lease - Check if lease conflict was resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) * @fl: Lock state to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) * %true: Lease conflict was resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) * %false: Lease conflict was not resolved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) static bool nfsd_breaker_owns_lease(struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) struct nfs4_delegation *dl = fl->fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) struct svc_rqst *rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if (!i_am_nfsd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) rqst = kthread_data(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) clp = *(rqst->rq_lease_breaker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) return dl->dl_stid.sc_client == clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) struct list_head *dispose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) if (arg & F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) return lease_modify(onlist, arg, dispose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) static const struct lock_manager_operations nfsd_lease_mng_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) .lm_break = nfsd_break_deleg_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) .lm_change = nfsd_change_deleg_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) if (nfsd4_has_session(cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) if (seqid == so->so_seqid - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) return nfserr_replay_me;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) if (seqid == so->so_seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) return nfserr_bad_seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) static __be32 lookup_clientid(clientid_t *clid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) struct nfsd_net *nn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) bool sessions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) struct nfs4_client *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) if (cstate->clp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) found = cstate->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) if (!same_clid(&found->cl_clientid, clid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) return nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) if (STALE_CLIENTID(clid, nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) return nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) * cached already then we know this is for is for v4.0 and "sessions"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) * will be false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) WARN_ON_ONCE(cstate->session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) found = find_confirmed_client(clid, sessions, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) return nfserr_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) atomic_inc(&found->cl_rpc_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) /* Cache the nfs4_client in cstate! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) cstate->clp = found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) nfsd4_process_open1(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) struct nfsd4_open *open, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) clientid_t *clientid = &open->op_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) struct nfs4_client *clp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) unsigned int strhashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) struct nfs4_openowner *oo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) if (STALE_CLIENTID(&open->op_clientid, nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) return nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) * In case we need it later, after we've already created the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) * file and don't want to risk a further failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) open->op_file = nfsd4_alloc_file();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) if (open->op_file == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) status = lookup_clientid(clientid, cstate, nn, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) clp = cstate->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) strhashval = ownerstr_hashval(&open->op_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) oo = find_openstateowner_str(strhashval, open, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) open->op_openowner = oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) if (!oo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) goto new_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) /* Replace unconfirmed owners without checking for replay. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) release_openowner(oo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) open->op_openowner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) goto new_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) goto alloc_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) new_owner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) oo = alloc_init_open_stateowner(strhashval, open, cstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) if (oo == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) open->op_openowner = oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) alloc_stateid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) open->op_stp = nfs4_alloc_open_stateid(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) if (!open->op_stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) if (nfsd4_has_session(cstate) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) open->op_odstate = alloc_clnt_odstate(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) if (!open->op_odstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) static inline __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) return nfserr_openmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) static int share_access_to_flags(u32 share_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) struct nfs4_stid *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) ret = find_stateid_by_type(cl, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) return delegstateid(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) struct nfs4_delegation **dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) __be32 status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) struct nfs4_delegation *deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) if (deleg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) nfs4_put_stid(&deleg->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) if (cl->cl_minorversion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) status = nfserr_deleg_revoked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) flags = share_access_to_flags(open->op_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) status = nfs4_check_delegmode(deleg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) nfs4_put_stid(&deleg->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) *dp = deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) if (!nfsd4_is_deleg_cur(open))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) static inline int nfs4_access_to_access(u32 nfs4_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) if (nfs4_access & NFS4_SHARE_ACCESS_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) flags |= NFSD_MAY_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) flags |= NFSD_MAY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) static inline __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) struct iattr iattr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) .ia_valid = ATTR_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) .ia_size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) if (!open->op_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) struct nfsd_file *nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) int oflag = nfs4_access_to_omode(open->op_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) int access = nfs4_access_to_access(open->op_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) unsigned char old_access_bmap, old_deny_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) * Are we trying to set a deny mode that would conflict with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) * current access?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) status = nfs4_file_check_deny(fp, open->op_share_deny);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) if (status != nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) /* set access to the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) status = nfs4_file_get_access(fp, open->op_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) if (status != nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) /* Set access bits in stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) old_access_bmap = stp->st_access_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) set_access(open->op_share_access, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) /* Set new deny mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) old_deny_bmap = stp->st_deny_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) set_deny(open->op_share_deny, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) if (!fp->fi_fds[oflag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) goto out_put_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) if (!fp->fi_fds[oflag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) fp->fi_fds[oflag] = nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) access));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) goto out_put_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) status = nfsd4_truncate(rqstp, cur_fh, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) goto out_put_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) out_put_access:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) stp->st_access_bmap = old_access_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) nfs4_file_put_access(fp, open->op_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) unsigned char old_deny_bmap = stp->st_deny_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) if (!test_access(open->op_share_access, stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) /* test and set deny mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) status = nfs4_file_check_deny(fp, open->op_share_deny);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) if (status == nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) set_deny(open->op_share_deny, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) fp->fi_share_deny |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) if (status != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) status = nfsd4_truncate(rqstp, cur_fh, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) if (status != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) reset_union_bmap_deny(old_deny_bmap, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) /* Should we give out recallable state?: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) if (clp->cl_cb_state == NFSD4_CB_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) * In the sessions case, since we don't have to establish a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) * separate connection for callbacks, we assume it's OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) * until we hear otherwise:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) struct file_lock *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) fl = locks_alloc_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) fl->fl_lmops = &nfsd_lease_mng_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) fl->fl_flags = FL_DELEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) fl->fl_end = OFFSET_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) fl->fl_owner = (fl_owner_t)dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) fl->fl_pid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) static struct nfs4_delegation *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) struct file_lock *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) * The fi_had_conflict and nfs_get_existing_delegation checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) * here are just optimizations; we'll need to recheck them at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) * the end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) if (fp->fi_had_conflict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) nf = find_readable_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) if (!nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) /* We should always have a readable file here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) if (nfs4_delegation_exists(clp, fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) else if (!fp->fi_deleg_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) fp->fi_deleg_file = nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) /* increment early to prevent fi_deleg_file from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) * cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) fp->fi_delegees = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) fp->fi_delegees++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) return ERR_PTR(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) dp = alloc_init_deleg(clp, fp, fh, odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) if (!dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) goto out_delegees;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) goto out_clnt_odstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) locks_free_lock(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) goto out_clnt_odstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) if (fp->fi_had_conflict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) status = hash_delegation_locked(dp, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) return dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) out_clnt_odstate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) put_clnt_odstate(dp->dl_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) out_delegees:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) put_deleg_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) return ERR_PTR(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) if (status == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) open->op_why_no_deleg = WND4_CONTENTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) open->op_why_no_deleg = WND4_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) switch (open->op_deleg_want) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) case NFS4_SHARE_WANT_READ_DELEG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) case NFS4_SHARE_WANT_WRITE_DELEG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) case NFS4_SHARE_WANT_ANY_DELEG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) case NFS4_SHARE_WANT_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) open->op_why_no_deleg = WND4_CANCELLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) case NFS4_SHARE_WANT_NO_DELEG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) * Attempt to hand out a delegation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) * Note we don't support write delegations, and won't until the vfs has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) * proper support for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) struct nfs4_openowner *oo = openowner(stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) struct nfs4_client *clp = stp->st_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) int cb_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) open->op_recall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) switch (open->op_claim_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) case NFS4_OPEN_CLAIM_PREVIOUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) if (!cb_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) open->op_recall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) case NFS4_OPEN_CLAIM_NULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) case NFS4_OPEN_CLAIM_FH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) * Let's not give out any delegations till everyone's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) * had the chance to reclaim theirs, *and* until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) * NLM locks have all been reclaimed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) if (locks_in_grace(clp->net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) * Also, if the file was opened for write or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) * create, there's a good chance the client's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) * about to write to it, resulting in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) * immediate recall (since we don't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) * write delegations):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) if (open->op_create == NFS4_OPEN_CREATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) if (IS_ERR(dp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) goto out_no_deleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) out_no_deleg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) dprintk("NFSD: WARNING: refusing delegation reclaim\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) open->op_recall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) /* 4.1 client asking for a delegation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) if (open->op_deleg_want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) nfsd4_open_deleg_none_ext(open, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) struct nfs4_delegation *dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) /* Otherwise the client must be confused wanting a delegation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) * it already has, therefore we don't return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) struct nfsd4_compoundres *resp = rqstp->rq_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) struct nfs4_file *fp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) struct nfs4_ol_stateid *stp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) struct nfs4_delegation *dp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) bool new_stp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) * Lookup file; if found, lookup stateid and check open request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) * and check for delegations in the process of being recalled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) * If not found, create the nfs4_file struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) if (fp != open->op_file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) status = nfs4_check_deleg(cl, open, &dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) stp = nfsd4_find_and_lock_existing_open(fp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) open->op_file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) if (nfsd4_is_deleg_cur(open))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) if (!stp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) stp = init_open_stateid(fp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) if (!open->op_stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) new_stp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) * OPEN the file, or upgrade an existing OPEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) * If truncate fails, the OPEN fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) * stp is already locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) if (!new_stp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) /* Stateid was found, this is an OPEN upgrade */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) stp->st_stid.sc_type = NFS4_CLOSED_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) release_open_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) open->op_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) if (stp->st_clnt_odstate == open->op_odstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) open->op_odstate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) if (nfsd4_has_session(&resp->cstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) open->op_why_no_deleg = WND4_NOT_WANTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) goto nodeleg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) * Attempt to hand out a delegation. No error return, because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) * OPEN succeeds even if we fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) nfs4_open_delegation(current_fh, open, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) nodeleg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) trace_nfsd_open(&stp->st_stid.sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) /* 4.1 client trying to upgrade/downgrade delegation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) open->op_deleg_want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) nfsd4_deleg_xgrade_none_ext(open, dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) if (fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) put_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) * To finish the open response, we just need to set the rflags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) if (nfsd4_has_session(&resp->cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) if (dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) if (stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) struct nfsd4_open *open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) if (open->op_openowner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) nfsd4_cstate_assign_replay(cstate, so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) nfs4_put_stateowner(so);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) if (open->op_file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) kmem_cache_free(file_slab, open->op_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) if (open->op_stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) nfs4_put_stid(&open->op_stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) if (open->op_odstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) kmem_cache_free(odstate_slab, open->op_odstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) clientid_t *clid = &u->renew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) trace_nfsd_clid_renew(clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) status = lookup_clientid(clid, cstate, nn, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) clp = cstate->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) status = nfserr_cb_path_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) if (!list_empty(&clp->cl_delegations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) && clp->cl_cb_state != NFSD4_CB_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) nfsd4_end_grace(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) /* do nothing if grace period already ended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) if (nn->grace_ended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) trace_nfsd_grace_complete(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) nn->grace_ended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) * If the server goes down again right now, an NFSv4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) * client will still be allowed to reclaim after it comes back up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) * even if it hasn't yet had a chance to reclaim state this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) nfsd4_record_grace_done(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) * At this point, NFSv4 clients can still reclaim. But if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) * server crashes, any that have not yet reclaimed will be out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) * of luck on the next boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) * (NFSv4.1+ clients are considered to have reclaimed once they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) * have reclaimed after their first OPEN.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) locks_end_grace(&nn->nfsd4_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) * At this point, and once lockd and/or any other containers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) * exit their grace period, further reclaims will fail and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) * regular locking can resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) * If we've waited a lease period but there are still clients trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) * reclaim, wait a little longer to give them a chance to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) static bool clients_still_reclaiming(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) time64_t double_grace_period_end = nn->boot_time +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 2 * nn->nfsd4_lease;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) if (nn->track_reclaim_completes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) atomic_read(&nn->nr_reclaim_complete) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) nn->reclaim_str_hashtbl_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) if (!nn->somebody_reclaimed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) nn->somebody_reclaimed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) * If we've given them *two* lease times to reclaim, and they're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) * still not done, give up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) if (ktime_get_boottime_seconds() > double_grace_period_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) static time64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) nfs4_laundromat(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) struct nfsd4_blocked_lock *nbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) struct list_head *pos, *next, reaplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) time64_t cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) time64_t t, new_timeo = nn->nfsd4_lease;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) struct nfs4_cpntf_state *cps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) copy_stateid_t *cps_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) if (clients_still_reclaiming(nn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) new_timeo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) nfsd4_end_grace(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) INIT_LIST_HEAD(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) cps->cpntf_time < cutoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) _free_cpntf_state_locked(nn, cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) list_for_each_safe(pos, next, &nn->client_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) clp = list_entry(pos, struct nfs4_client, cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) if (clp->cl_time > cutoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) t = clp->cl_time - cutoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) new_timeo = min(new_timeo, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) if (mark_client_expired_locked(clp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) trace_nfsd_clid_expired(&clp->cl_clientid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) list_add(&clp->cl_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) list_for_each_safe(pos, next, &reaplist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) clp = list_entry(pos, struct nfs4_client, cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) trace_nfsd_clid_purged(&clp->cl_clientid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) list_del_init(&clp->cl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) expire_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) list_for_each_safe(pos, next, &nn->del_recall_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) if (dp->dl_time > cutoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) t = dp->dl_time - cutoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) new_timeo = min(new_timeo, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) WARN_ON(!unhash_delegation_locked(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) list_add(&dp->dl_recall_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) while (!list_empty(&reaplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) dp = list_first_entry(&reaplist, struct nfs4_delegation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) revoke_delegation(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) while (!list_empty(&nn->close_lru)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) oo_close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (oo->oo_time > cutoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) t = oo->oo_time - cutoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) new_timeo = min(new_timeo, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) list_del_init(&oo->oo_close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) stp = oo->oo_last_closed_stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) oo->oo_last_closed_stid = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) spin_lock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) spin_unlock(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) * It's possible for a client to try and acquire an already held lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) * that is being held for a long time, and then lose interest in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) * So, we clean out any un-revisited request after a lease period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) * under the assumption that the client is no longer interested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) * RFC5661, sec. 9.6 states that the client must not rely on getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) * notifications and must continue to poll for locks, even when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) * server supports them. Thus this shouldn't lead to clients blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) * indefinitely once the lock does become free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) BUG_ON(!list_empty(&reaplist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) while (!list_empty(&nn->blocked_locks_lru)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) nbl = list_first_entry(&nn->blocked_locks_lru,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) struct nfsd4_blocked_lock, nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) if (nbl->nbl_time > cutoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) t = nbl->nbl_time - cutoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) new_timeo = min(new_timeo, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) list_move(&nbl->nbl_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) list_del_init(&nbl->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) while (!list_empty(&reaplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) nbl = list_first_entry(&reaplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) struct nfsd4_blocked_lock, nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) list_del_init(&nbl->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) free_blocked_lock(nbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) new_timeo = max_t(time64_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) return new_timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) static struct workqueue_struct *laundry_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) static void laundromat_main(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) laundromat_main(struct work_struct *laundry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) time64_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) struct delayed_work *dwork = to_delayed_work(laundry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) laundromat_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) t = nfs4_laundromat(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) access_permit_read(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) test_access(NFS4_SHARE_ACCESS_WRITE, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) access_permit_write(struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) test_access(NFS4_SHARE_ACCESS_BOTH, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) __be32 status = nfserr_openmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) /* For lock stateid's, we test the parent open, not the lock: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) if (stp->st_openstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) stp = stp->st_openstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) if ((flags & WR_STATE) && !access_permit_write(stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) if ((flags & RD_STATE) && !access_permit_read(stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) static inline __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) if (ONE_STATEID(stateid) && (flags & RD_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) else if (opens_in_grace(net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) /* Answer in remaining cases depends on existence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) * conflicting state; so we must wait out the grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) return nfserr_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) } else if (flags & WR_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) return nfs4_share_conflict(current_fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) NFS4_SHARE_DENY_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) return nfs4_share_conflict(current_fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) NFS4_SHARE_DENY_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) * Allow READ/WRITE during grace period on recovered state only for files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) * that are not able to provide mandatory locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) grace_disallows_io(struct net *net, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) return opens_in_grace(net) && mandatory_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) * When sessions are used the stateid generation number is ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) * when it is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) if (has_session && in->si_generation == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) if (in->si_generation == ref->si_generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) /* If the client sends us a stateid from the future, it's buggy: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) if (nfsd4_stateid_generation_after(in, ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) * However, we could see a stateid from the past, even from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) * non-buggy client. For example, if the client sends a lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) * while some IO is outstanding, the lock may bump si_generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) * while the IO is still in flight. The client could avoid that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) * situation by waiting for responses on all the IO requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) * but better performance may result in retrying IO that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) * receives an old_stateid error if requests are rarely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) * reordered in flight:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) return nfserr_old_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) __be32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) spin_lock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) ret = nfsd4_verify_open_stid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) if (ret == nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) ret = check_stateid_generation(in, &s->sc_stateid, has_session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) spin_unlock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) if (ols->st_stateowner->so_is_open_owner &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) struct nfs4_stid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) __be32 status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) CLOSE_STATEID(stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) spin_lock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) s = find_stateid_locked(cl, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) switch (s->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) case NFS4_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) case NFS4_REVOKED_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) status = nfserr_deleg_revoked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) case NFS4_OPEN_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) case NFS4_LOCK_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) status = nfsd4_check_openowner_confirmed(openlockstateid(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) printk("unknown stateid type %x\n", s->sc_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) case NFS4_CLOSED_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) case NFS4_CLOSED_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) stateid_t *stateid, unsigned char typemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) struct nfs4_stid **s, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) bool return_revoked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) * only return revoked delegations if explicitly asked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) * otherwise we report revoked or bad_stateid status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) if (typemask & NFS4_REVOKED_DELEG_STID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) return_revoked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) else if (typemask & NFS4_DELEG_STID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) typemask |= NFS4_REVOKED_DELEG_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) CLOSE_STATEID(stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) if (status == nfserr_stale_clientid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) if (cstate->session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) return nfserr_stale_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) *s = find_stateid_by_type(cstate->clp, stateid, typemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) if (!*s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) nfs4_put_stid(*s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) if (cstate->minorversion)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) return nfserr_deleg_revoked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) static struct nfsd_file *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) nfs4_find_file(struct nfs4_stid *s, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) switch (s->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) case NFS4_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) return nfsd_file_get(s->sc_file->fi_deleg_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) case NFS4_OPEN_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) case NFS4_LOCK_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) if (flags & RD_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) return find_readable_file(s->sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) return find_writeable_file(s->sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) status = nfsd4_check_openowner_confirmed(ols);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) return nfs4_check_openmode(ols, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) struct nfsd_file **nfp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) nf = nfs4_find_file(s, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) acc | NFSD_MAY_OWNER_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) *nfp = nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) list_del(&cps->cp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) idr_remove(&nn->s2s_cp_stateids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) cps->cp_stateid.stid.si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) kfree(cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) * A READ from an inter server to server COPY will have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) * copy stateid. Look up the copy notify stateid from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) * idr structure and take a reference on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) struct nfs4_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) struct nfs4_cpntf_state **cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) copy_stateid_t *cps_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) struct nfs4_cpntf_state *state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) if (cps_t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) state = container_of(cps_t, struct nfs4_cpntf_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) cp_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) if (!clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) refcount_inc(&state->cp_stateid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) _free_cpntf_state_locked(nn, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) if (!clp && state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) *cps = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) struct nfs4_stid **stid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) struct nfs4_cpntf_state *cps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) struct nfsd4_compound_state cstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) status = manage_cpntf_state(nn, st, NULL, &cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) cps->cpntf_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) memset(&cstate, 0, sizeof(cstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) stid, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) put_client_renew(cstate.clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) nfs4_put_cpntf_state(nn, cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) spin_lock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) _free_cpntf_state_locked(nn, cps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) spin_unlock(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) * Checks for stateid operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) stateid_t *stateid, int flags, struct nfsd_file **nfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) struct nfs4_stid **cstid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) struct inode *ino = d_inode(fhp->fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) struct nfs4_stid *s = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) if (nfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) *nfp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) if (grace_disallows_io(net, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) return nfserr_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) status = check_special_stateids(net, fhp, stateid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) status = nfsd4_lookup_stateid(cstate, stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) &s, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) if (status == nfserr_bad_stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) status = find_cpntf_state(nn, stateid, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) status = nfsd4_stid_check_stateid_generation(stateid, s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) nfsd4_has_session(cstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) switch (s->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) case NFS4_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) status = nfs4_check_delegmode(delegstateid(s), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) case NFS4_OPEN_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) case NFS4_LOCK_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) status = nfs4_check_olstateid(openlockstateid(s), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) status = nfs4_check_fh(fhp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) if (status == nfs_ok && nfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) if (s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) if (!status && cstid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) *cstid = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) nfs4_put_stid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) * Test if the stateid is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) struct nfsd4_test_stateid_id *stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) struct nfs4_client *cl = cstate->session->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) stateid->ts_id_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) struct nfs4_ol_stateid *stp = openlockstateid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) __be32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) ret = nfsd4_lock_ol_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) goto out_put_stid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) ret = nfserr_locks_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) if (check_for_locks(stp->st_stid.sc_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) lockowner(stp->st_stateowner)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) release_lock_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) ret = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) out_put_stid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) nfs4_put_stid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) stateid_t *stateid = &free_stateid->fr_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) struct nfs4_stid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) struct nfs4_client *cl = cstate->session->se_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) __be32 ret = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) spin_lock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) s = find_stateid_locked(cl, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) spin_lock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) switch (s->sc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) case NFS4_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) ret = nfserr_locks_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) case NFS4_OPEN_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) ret = nfserr_locks_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) case NFS4_LOCK_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) spin_unlock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) refcount_inc(&s->sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) ret = nfsd4_free_lock_stateid(stateid, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) case NFS4_REVOKED_DELEG_STID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) spin_unlock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) dp = delegstateid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) nfs4_put_stid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) ret = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) /* Default falls through and returns nfserr_bad_stateid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) spin_unlock(&s->sc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) spin_unlock(&cl->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) setlkflg (int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) RD_STATE : WR_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) struct svc_fh *current_fh = &cstate->current_fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) struct nfs4_stateowner *sop = stp->st_stateowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) status = nfsd4_check_seqid(cstate, sop, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) status = nfsd4_lock_ol_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) if (status != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) if (status == nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) status = nfs4_check_fh(current_fh, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) if (status != nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) * Checks for sequence id mutating operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) stateid_t *stateid, char typemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) struct nfs4_ol_stateid **stpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) struct nfs4_stid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) struct nfs4_ol_stateid *stp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) trace_nfsd_preprocess(seqid, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) *stpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) stp = openlockstateid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) *stpp = stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) NFS4_OPEN_STID, &stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) oo = openowner(stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) return nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) *stpp = stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) struct nfsd4_open_confirm *oc = &u->open_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) struct nfs4_openowner *oo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) cstate->current_fh.fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) status = nfs4_preprocess_seqid_op(cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) oc->oc_seqid, &oc->oc_req_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) NFS4_OPEN_STID, &stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) oo = openowner(stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) if (oo->oo_flags & NFS4_OO_CONFIRMED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) goto put_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) oo->oo_flags |= NFS4_OO_CONFIRMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) nfsd4_client_record_create(oo->oo_owner.so_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) put_stateid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) nfsd4_bump_seqid(cstate, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) if (!test_access(access, stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) nfs4_file_put_access(stp->st_stid.sc_file, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) clear_access(access, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) switch (to_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) case NFS4_SHARE_ACCESS_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) case NFS4_SHARE_ACCESS_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) case NFS4_SHARE_ACCESS_BOTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) nfsd4_open_downgrade(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) struct nfsd4_open_downgrade *od = &u->open_downgrade;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) cstate->current_fh.fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) /* We don't yet support WANT bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) if (od->od_deleg_want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) od->od_deleg_want);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) &od->od_stateid, &stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) if (!test_access(od->od_share_access, stp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) stp->st_access_bmap, od->od_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) goto put_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) if (!test_deny(od->od_share_deny, stp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) stp->st_deny_bmap, od->od_share_deny);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) goto put_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) nfs4_stateid_downgrade(stp, od->od_share_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) reset_union_bmap_deny(od->od_share_deny, stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) put_stateid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) nfsd4_bump_seqid(cstate, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) struct nfs4_client *clp = s->st_stid.sc_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) bool unhashed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) LIST_HEAD(reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) unhashed = unhash_open_stateid(s, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) if (clp->cl_minorversion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) if (unhashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) put_ol_stateid_locked(s, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) free_ol_stateid_reaplist(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) free_ol_stateid_reaplist(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) if (unhashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) move_to_close_lru(s, clp->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) * nfs4_unlock_state() called after encode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) struct nfsd4_close *close = &u->close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) dprintk("NFSD: nfsd4_close on file %pd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) cstate->current_fh.fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) &close->cl_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) NFS4_OPEN_STID|NFS4_CLOSED_STID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) &stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) nfsd4_bump_seqid(cstate, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) stp->st_stid.sc_type = NFS4_CLOSED_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) * Technically we don't _really_ have to increment or copy it, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) * it should just be gone after this operation and we clobber the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) * copied value below, but we continue to do so here just to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) * that racing ops see that there was a state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) nfsd4_close_open_stateid(stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) /* v4.1+ suggests that we send a special stateid in here, since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) * clients should just ignore this anyway. Since this is not useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) * for v4.0 clients either, we set it to the special close_stateid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) * universally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) /* put reference from nfs4_preprocess_seqid_op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) struct nfsd4_delegreturn *dr = &u->delegreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) struct nfs4_delegation *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) stateid_t *stateid = &dr->dr_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) struct nfs4_stid *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) dp = delegstateid(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) goto put_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) destroy_delegation(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) put_stateid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) nfs4_put_stid(&dp->dl_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) end_offset(u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) return end >= start ? end: NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) /* last octet in a range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) last_byte_offset(u64 start, u64 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) WARN_ON_ONCE(!len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) return end > start ? end - 1: NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) * locking, this prevents us from being completely protocol-compliant. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) * real solution to this problem is to start using unsigned file offsets in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) * the VFS, but this is a very deep change!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) nfs4_transform_lock_offset(struct file_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) if (lock->fl_start < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) lock->fl_start = OFFSET_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) if (lock->fl_end < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) lock->fl_end = OFFSET_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) static fl_owner_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) nfsd4_fl_get_owner(fl_owner_t owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) nfs4_get_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) return owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) nfsd4_fl_put_owner(fl_owner_t owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) if (lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) nfs4_put_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) nfsd4_lm_notify(struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) struct net *net = lo->lo_owner.so_client->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) struct nfsd4_blocked_lock *nbl = container_of(fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) struct nfsd4_blocked_lock, nbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) bool queue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) /* An empty list means that something else is going to be using it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) if (!list_empty(&nbl->nbl_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) list_del_init(&nbl->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) list_del_init(&nbl->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) queue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) if (queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) nfsd4_run_cb(&nbl->nbl_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) static const struct lock_manager_operations nfsd_posix_mng_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) .lm_notify = nfsd4_lm_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) .lm_get_owner = nfsd4_fl_get_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) .lm_put_owner = nfsd4_fl_put_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) struct nfs4_lockowner *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) if (fl->fl_lmops == &nfsd_posix_mng_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) lo = (struct nfs4_lockowner *) fl->fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) if (!deny->ld_owner.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) /* We just don't care that much */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) goto nevermind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) nevermind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) deny->ld_owner.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) deny->ld_owner.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) deny->ld_clientid.cl_boot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) deny->ld_clientid.cl_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) deny->ld_start = fl->fl_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) deny->ld_length = NFS4_MAX_UINT64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) if (fl->fl_end != NFS4_MAX_UINT64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) deny->ld_length = fl->fl_end - fl->fl_start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) deny->ld_type = NFS4_READ_LT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) if (fl->fl_type != F_RDLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) deny->ld_type = NFS4_WRITE_LT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) static struct nfs4_lockowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) unsigned int strhashval = ownerstr_hashval(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) struct nfs4_stateowner *so;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) lockdep_assert_held(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) so_strhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) if (so->so_is_open_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) if (same_owner_str(so, owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) return lockowner(nfs4_get_stateowner(so));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) static struct nfs4_lockowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) struct nfs4_lockowner *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) lo = find_lockowner_str_locked(clp, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) return lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) unhash_lockowner_locked(lockowner(sop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) struct nfs4_lockowner *lo = lockowner(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) kmem_cache_free(lockowner_slab, lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) static const struct nfs4_stateowner_operations lockowner_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) .so_unhash = nfs4_unhash_lockowner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) .so_free = nfs4_free_lockowner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) * Alloc a lock owner structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) * occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) * strhashval = ownerstr_hashval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) static struct nfs4_lockowner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) struct nfs4_ol_stateid *open_stp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) struct nfsd4_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) struct nfs4_lockowner *lo, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) if (!lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) INIT_LIST_HEAD(&lo->lo_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) lo->lo_owner.so_is_open_owner = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) lo->lo_owner.so_ops = &lockowner_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) if (ret == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) list_add(&lo->lo_owner.so_strhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) &clp->cl_ownerstr_hashtbl[strhashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) ret = lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) nfs4_free_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) find_lock_stateid(const struct nfs4_lockowner *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) const struct nfs4_ol_stateid *ost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) struct nfs4_ol_stateid *lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) /* If ost is not hashed, ost->st_locks will not be valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) if (!nfs4_ol_stateid_unhashed(ost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) list_for_each_entry(lst, &ost->st_locks, st_locks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) if (lst->st_stateowner == &lo->lo_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) refcount_inc(&lst->st_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) return lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) struct nfs4_file *fp, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) struct nfs4_ol_stateid *open_stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) struct nfs4_client *clp = lo->lo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) struct nfs4_ol_stateid *retstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) mutex_init(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) if (nfs4_ol_stateid_unhashed(open_stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) goto out_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) retstp = find_lock_stateid(lo, open_stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) if (retstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) refcount_inc(&stp->st_stid.sc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) stp->st_stid.sc_type = NFS4_LOCK_STID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) get_nfs4_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) stp->st_stid.sc_file = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) stp->st_access_bmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) stp->st_deny_bmap = open_stp->st_deny_bmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) stp->st_openstp = open_stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) list_add(&stp->st_locks, &open_stp->st_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) list_add(&stp->st_perfile, &fp->fi_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) return stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) out_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) nfs4_put_stid(&retstp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) /* To keep mutex tracking happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) return retstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) out_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) static struct nfs4_ol_stateid *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) struct inode *inode, struct nfs4_ol_stateid *ost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) bool *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) struct nfs4_stid *ns = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) struct nfs4_ol_stateid *lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) struct nfs4_openowner *oo = openowner(ost->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) struct nfs4_client *clp = oo->oo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) *new = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) lst = find_lock_stateid(lo, ost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) if (lst != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) nfs4_put_stid(&lst->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) if (ns == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) if (lst == openlockstateid(ns))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) *new = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) nfs4_put_stid(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) return lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) check_lock_length(u64 offset, u64 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) (length > ~offset)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) struct nfs4_file *fp = lock_stp->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) lockdep_assert_held(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) if (test_access(access, lock_stp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) __nfs4_file_get_access(fp, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) set_access(access, lock_stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) struct nfs4_ol_stateid *ost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) struct nfsd4_lock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) struct nfs4_ol_stateid **plst, bool *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) struct nfs4_file *fi = ost->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) struct nfs4_openowner *oo = openowner(ost->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) struct nfs4_client *cl = oo->oo_owner.so_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) struct nfs4_lockowner *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) struct nfs4_ol_stateid *lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) unsigned int strhashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) lo = find_lockowner_str(cl, &lock->lk_new_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) if (!lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) strhashval = ownerstr_hashval(&lock->lk_new_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) if (lo == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) return nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) /* with an existing lockowner, seqids must be the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) status = nfserr_bad_seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) if (!cstate->minorversion &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) if (lst == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) status = nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) *plst = lst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) nfs4_put_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) * LOCK operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) struct nfsd4_lock *lock = &u->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) struct nfs4_openowner *open_sop = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) struct nfs4_lockowner *lock_sop = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) struct nfs4_ol_stateid *lock_stp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) struct nfs4_ol_stateid *open_stp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) struct nfs4_file *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) struct nfsd_file *nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) struct nfsd4_blocked_lock *nbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) struct file_lock *file_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) struct file_lock *conflock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) __be32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) int lkflg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) bool new = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) unsigned char fl_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) unsigned int fl_flags = FL_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) struct net *net = SVC_NET(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) (long long) lock->lk_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) (long long) lock->lk_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) if (check_lock_length(lock->lk_offset, lock->lk_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) if ((status = fh_verify(rqstp, &cstate->current_fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) S_IFREG, NFSD_MAY_LOCK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) dprintk("NFSD: nfsd4_lock: permission denied!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) if (lock->lk_is_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) if (nfsd4_has_session(cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) /* See rfc 5661 18.10.3: given clientid is ignored: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) memcpy(&lock->lk_new_clientid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) &cstate->session->se_client->cl_clientid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) sizeof(clientid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) status = nfserr_stale_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) /* validate and update open stateid and open seqid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) status = nfs4_preprocess_confirmed_seqid_op(cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) lock->lk_new_open_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) &lock->lk_new_open_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) &open_stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) mutex_unlock(&open_stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) open_sop = openowner(open_stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) status = nfserr_bad_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) &lock->lk_new_clientid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) status = lookup_or_create_lock_state(cstate, open_stp, lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) &lock_stp, &new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) status = nfs4_preprocess_seqid_op(cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) lock->lk_old_lock_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) &lock->lk_old_lock_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) NFS4_LOCK_STID, &lock_stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) lock_sop = lockowner(lock_stp->st_stateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) lkflg = setlkflg(lock->lk_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) status = nfs4_check_openmode(lock_stp, lkflg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) status = nfserr_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) if (locks_in_grace(net) && !lock->lk_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) status = nfserr_no_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) if (!locks_in_grace(net) && lock->lk_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) fp = lock_stp->st_stid.sc_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) switch (lock->lk_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) case NFS4_READW_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) if (nfsd4_has_session(cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) fl_flags |= FL_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) case NFS4_READ_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) nf = find_readable_file_locked(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) fl_type = F_RDLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) case NFS4_WRITEW_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) if (nfsd4_has_session(cstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) fl_flags |= FL_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) case NFS4_WRITE_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) spin_lock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) nf = find_writeable_file_locked(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) spin_unlock(&fp->fi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) fl_type = F_WRLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) if (!nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) status = nfserr_openmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) if (!nbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) dprintk("NFSD: %s: unable to allocate block!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) file_lock = &nbl->nbl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) file_lock->fl_type = fl_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) file_lock->fl_pid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) file_lock->fl_file = nf->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) file_lock->fl_flags = fl_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) file_lock->fl_lmops = &nfsd_posix_mng_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) file_lock->fl_start = lock->lk_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) nfs4_transform_lock_offset(file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) conflock = locks_alloc_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) if (!conflock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) if (fl_flags & FL_SLEEP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) nbl->nbl_time = ktime_get_boottime_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) case 0: /* success! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) if (lock->lk_reclaim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) nn->somebody_reclaimed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) case FILE_LOCK_DEFERRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) nbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) case -EAGAIN: /* conflock holds conflicting lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) status = nfserr_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) nfs4_set_lock_denied(conflock, &lock->lk_denied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) case -EDEADLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) status = nfserr_deadlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) status = nfserrno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) if (nbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) /* dequeue it if we queued it before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) if (fl_flags & FL_SLEEP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) spin_lock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) list_del_init(&nbl->nbl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) list_del_init(&nbl->nbl_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) spin_unlock(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) free_blocked_lock(nbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) if (nf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) if (lock_stp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) /* Bump seqid manually if the 4.0 replay owner is openowner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) if (cstate->replay_owner &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) cstate->replay_owner != &lock_sop->lo_owner &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) seqid_mutating_err(ntohl(status)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) lock_sop->lo_owner.so_seqid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) * If this is a new, never-before-used stateid, and we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) * returning an error, then just go ahead and release it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) if (status && new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) release_lock_stateid(lock_stp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) mutex_unlock(&lock_stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) nfs4_put_stid(&lock_stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) if (open_stp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) nfs4_put_stid(&open_stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) nfsd4_bump_seqid(cstate, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) if (conflock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) locks_free_lock(conflock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) * so we do a temporary open here just to get an open file to pass to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) * vfs_test_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) struct nfsd_file *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) __be32 err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) fh_lock(fhp); /* to block new leases till after test_lock: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) NFSD_MAY_READ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) lock->fl_file = nf->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) err = nfserrno(vfs_test_lock(nf->nf_file, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) lock->fl_file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) fh_unlock(fhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) * LOCKT operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) struct nfsd4_lockt *lockt = &u->lockt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) struct file_lock *file_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) struct nfs4_lockowner *lo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) if (locks_in_grace(SVC_NET(rqstp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) return nfserr_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) if (check_lock_length(lockt->lt_offset, lockt->lt_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) if (!nfsd4_has_session(cstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) file_lock = locks_alloc_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) if (!file_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) switch (lockt->lt_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) case NFS4_READ_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) case NFS4_READW_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) file_lock->fl_type = F_RDLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) case NFS4_WRITE_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) case NFS4_WRITEW_LT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) file_lock->fl_type = F_WRLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) dprintk("NFSD: nfs4_lockt: bad lock type!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) status = nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) if (lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) file_lock->fl_owner = (fl_owner_t)lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) file_lock->fl_pid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) file_lock->fl_flags = FL_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) file_lock->fl_start = lockt->lt_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) nfs4_transform_lock_offset(file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) if (file_lock->fl_type != F_UNLCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) status = nfserr_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) if (lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) nfs4_put_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) if (file_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) locks_free_lock(file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) struct nfsd4_locku *locku = &u->locku;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) struct nfsd_file *nf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) struct file_lock *file_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) (long long) locku->lu_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) (long long) locku->lu_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) if (check_lock_length(locku->lu_offset, locku->lu_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) return nfserr_inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) &locku->lu_stateid, NFS4_LOCK_STID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) &stp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) nf = find_any_file(stp->st_stid.sc_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) if (!nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) status = nfserr_lock_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) goto put_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) file_lock = locks_alloc_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) if (!file_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) status = nfserr_jukebox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) goto put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) file_lock->fl_type = F_UNLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) file_lock->fl_pid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) file_lock->fl_file = nf->nf_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) file_lock->fl_flags = FL_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) file_lock->fl_lmops = &nfsd_posix_mng_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) file_lock->fl_start = locku->lu_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) file_lock->fl_end = last_byte_offset(locku->lu_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) locku->lu_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) nfs4_transform_lock_offset(file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) goto out_nfserr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) put_file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) put_stateid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) mutex_unlock(&stp->st_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) nfs4_put_stid(&stp->st_stid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) nfsd4_bump_seqid(cstate, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) if (file_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) locks_free_lock(file_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) out_nfserr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) status = nfserrno(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) goto put_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) * returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) * true: locks held by lockowner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) * false: no locks held by lockowner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) struct file_lock *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) int status = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) struct nfsd_file *nf = find_any_file(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) struct file_lock_context *flctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) if (!nf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) /* Any valid lock stateid should have some sort of access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) inode = locks_inode(nf->nf_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) flctx = inode->i_flctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) if (flctx && !list_empty_careful(&flctx->flc_posix)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) spin_lock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) if (fl->fl_owner == (fl_owner_t)lowner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) status = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) spin_unlock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) nfsd_file_put(nf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) nfsd4_release_lockowner(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) clientid_t *clid = &rlockowner->rl_clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) struct nfs4_stateowner *sop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) struct nfs4_lockowner *lo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) struct nfs4_ol_stateid *stp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) struct xdr_netobj *owner = &rlockowner->rl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) unsigned int hashval = ownerstr_hashval(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) struct nfs4_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) LIST_HEAD (reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) clid->cl_boot, clid->cl_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) status = lookup_clientid(clid, cstate, nn, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) clp = cstate->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) /* Find the matching lock stateowner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) so_strhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) if (sop->so_is_open_owner || !same_owner_str(sop, owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) /* see if there are still any locks associated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) lo = lockowner(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) if (check_for_locks(stp->st_stid.sc_file, lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) status = nfserr_locks_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) nfs4_get_stateowner(sop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) if (!lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) unhash_lockowner_locked(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) while (!list_empty(&lo->lo_owner.so_stateids)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) stp = list_first_entry(&lo->lo_owner.so_stateids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) struct nfs4_ol_stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) st_perstateowner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) WARN_ON(!unhash_lock_stateid(stp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) put_ol_stateid_locked(stp, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) free_ol_stateid_reaplist(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) remove_blocked_locks(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) nfs4_put_stateowner(&lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) static inline struct nfs4_client_reclaim *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) alloc_reclaim(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) struct nfs4_client_reclaim *crp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) crp = nfsd4_find_reclaim_client(name, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) return (crp && crp->cr_clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) * failure => all reset bets are off, nfserr_no_grace...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) * The caller is responsible for freeing name.data if NULL is returned (it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) * will be freed in nfs4_remove_reclaim_record in the normal case).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) struct nfs4_client_reclaim *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) unsigned int strhashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) struct nfs4_client_reclaim *crp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) crp = alloc_reclaim();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) if (crp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) strhashval = clientstr_hashval(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) INIT_LIST_HEAD(&crp->cr_strhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) crp->cr_name.data = name.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) crp->cr_name.len = name.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) crp->cr_princhash.data = princhash.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) crp->cr_princhash.len = princhash.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) crp->cr_clp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) nn->reclaim_str_hashtbl_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) return crp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) list_del(&crp->cr_strhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) kfree(crp->cr_name.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) kfree(crp->cr_princhash.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) kfree(crp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) nn->reclaim_str_hashtbl_size--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) nfs4_release_reclaim(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) struct nfs4_client_reclaim *crp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) for (i = 0; i < CLIENT_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) crp = list_entry(nn->reclaim_str_hashtbl[i].next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) struct nfs4_client_reclaim, cr_strhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) nfs4_remove_reclaim_record(crp, nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) struct nfs4_client_reclaim *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) unsigned int strhashval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) struct nfs4_client_reclaim *crp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) strhashval = clientstr_hashval(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) if (compare_blob(&crp->cr_name, &name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) return crp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) * Called from OPEN. Look for clientid in reclaim list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) nfs4_check_open_reclaim(clientid_t *clid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) /* find clientid in conf_id_hashtbl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) status = lookup_clientid(clid, cstate, nn, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) return nfserr_reclaim_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) return nfserr_no_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) if (nfsd4_client_record_check(cstate->clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) return nfserr_reclaim_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) return nfs_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) * Since the lifetime of a delegation isn't limited to that of an open, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) * client may quite reasonably hang on to a delegation as long as it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) * the inode cached. This becomes an obvious problem the first time a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) * client's inode cache approaches the size of the server's total memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) * For now we avoid this problem by imposing a hard limit on the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) * of delegations, which varies according to the server's memory size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) set_max_delegations(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) * Allow at most 4 delegations per megabyte of RAM. Quick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) * estimates suggest that in the worst case (where every delegation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) * is for a different inode), a delegation could take about 1.5K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) * giving a worst case usage of about 6% of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) static int nfs4_state_create_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) if (!nn->conf_id_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) if (!nn->unconf_id_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) goto err_unconf_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) if (!nn->sessionid_hashtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) goto err_sessionid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) for (i = 0; i < CLIENT_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) for (i = 0; i < SESSION_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) nn->conf_name_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) nn->unconf_name_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) nn->boot_time = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) nn->grace_ended = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) nn->nfsd4_manager.block_opens = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) INIT_LIST_HEAD(&nn->nfsd4_manager.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) INIT_LIST_HEAD(&nn->client_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) INIT_LIST_HEAD(&nn->close_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) INIT_LIST_HEAD(&nn->del_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) spin_lock_init(&nn->client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) spin_lock_init(&nn->s2s_cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) idr_init(&nn->s2s_cp_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) spin_lock_init(&nn->blocked_locks_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) INIT_LIST_HEAD(&nn->blocked_locks_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) get_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) err_sessionid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) kfree(nn->unconf_id_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) err_unconf_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) kfree(nn->conf_id_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) nfs4_state_destroy_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) struct nfs4_client *clp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) for (i = 0; i < CLIENT_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) while (!list_empty(&nn->conf_id_hashtbl[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) destroy_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) WARN_ON(!list_empty(&nn->blocked_locks_lru));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) for (i = 0; i < CLIENT_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) while (!list_empty(&nn->unconf_id_hashtbl[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) destroy_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) kfree(nn->sessionid_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) kfree(nn->unconf_id_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) kfree(nn->conf_id_hashtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) put_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) nfs4_state_start_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) ret = get_nfsdfs(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) ret = nfs4_state_create_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) mntput(nn->nfsd_mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) locks_start_grace(net, &nn->nfsd4_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) nfsd4_client_tracking_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) goto skip_grace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) nn->nfsd4_grace, net->ns.inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) trace_nfsd_grace_start(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) skip_grace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) net->ns.inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) nfsd4_end_grace(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) /* initialization to perform when the nfsd service is started: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) nfs4_state_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) if (laundry_wq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) ret = nfsd4_create_callback_queue();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) goto out_free_laundry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) set_max_delegations();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) out_free_laundry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) destroy_workqueue(laundry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) nfs4_state_shutdown_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) struct nfs4_delegation *dp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) struct list_head *pos, *next, reaplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) cancel_delayed_work_sync(&nn->laundromat_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) locks_end_grace(&nn->nfsd4_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) INIT_LIST_HEAD(&reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) spin_lock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) list_for_each_safe(pos, next, &nn->del_recall_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) WARN_ON(!unhash_delegation_locked(dp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) list_add(&dp->dl_recall_lru, &reaplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) spin_unlock(&state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) list_for_each_safe(pos, next, &reaplist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) list_del_init(&dp->dl_recall_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) destroy_unhashed_deleg(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) nfsd4_client_tracking_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) nfs4_state_destroy_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) mntput(nn->nfsd_mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) nfs4_state_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) destroy_workqueue(laundry_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) nfsd4_destroy_callback_queue();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) CURRENT_STATEID(stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) if (cstate->minorversion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) clear_current_stateid(struct nfsd4_compound_state *cstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) * functions to set current state id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) put_stateid(cstate, &u->open_downgrade.od_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) put_stateid(cstate, &u->open.op_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) put_stateid(cstate, &u->close.cl_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) put_stateid(cstate, &u->lock.lk_resp_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) * functions to consume current state id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) get_stateid(cstate, &u->open_downgrade.od_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) get_stateid(cstate, &u->delegreturn.dr_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) get_stateid(cstate, &u->free_stateid.fr_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) get_stateid(cstate, &u->setattr.sa_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) get_stateid(cstate, &u->close.cl_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) get_stateid(cstate, &u->locku.lu_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) get_stateid(cstate, &u->read.rd_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) union nfsd4_op_u *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) get_stateid(cstate, &u->write.wr_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) }