^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * fs/nfs/nfs4state.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Client-side XDR for NFSv4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2002 The Regents of the University of Michigan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Kendrick Smith <kmsmith@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * 3. Neither the name of the University nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Implementation of the NFSv4 state model. For the time being,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * this is minimal, but will be made much more complex in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * subsequent patch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "nfs4_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "callback.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "delegation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "nfs4idmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include "nfs4session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "nfs4trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define NFSDBG_FACILITY NFSDBG_STATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define OPENOWNER_POOL_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) const nfs4_stateid zero_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) { .data = { 0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .type = NFS4_SPECIAL_STATEID_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) const nfs4_stateid invalid_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Funky initialiser keeps older gcc versions happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .data = { 0xff, 0xff, 0xff, 0xff, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) .type = NFS4_INVALID_STATEID_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) const nfs4_stateid current_stateid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Funky initialiser keeps older gcc versions happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) .data = { 0x0, 0x0, 0x0, 0x1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .type = NFS4_SPECIAL_STATEID_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static DEFINE_MUTEX(nfs_clid_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int nfs4_setup_state_renewal(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct nfs_fsinfo fsinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) nfs4_schedule_state_renewal(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) status = nfs4_proc_get_lease_time(clp, &fsinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) nfs4_schedule_state_renewal(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int nfs4_init_clientid(struct nfs_client *clp, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct nfs4_setclientid_res clid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .clientid = clp->cl_clientid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .confirm = clp->cl_confirm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned short port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto do_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) port = nn->nfs_callback_tcpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (clp->cl_addr.ss_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) port = nn->nfs_callback_tcpport6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) clp->cl_clientid = clid.clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) clp->cl_confirm = clid.confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) do_confirm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) nfs4_setup_state_renewal(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @clp: nfs_client under test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @result: OUT: found nfs_client, or clp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @cred: credential to use for trunking test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Returns zero, a negative errno, or a negative NFS4ERR status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * If zero is returned, an nfs_client pointer is planted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * "result".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Note: The returned client may not yet be marked ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int nfs40_discover_server_trunking(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct nfs_client **result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct nfs4_setclientid_res clid = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) .clientid = clp->cl_clientid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) .confirm = clp->cl_confirm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned short port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) port = nn->nfs_callback_tcpport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (clp->cl_addr.ss_family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) port = nn->nfs_callback_tcpport6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) clp->cl_clientid = clid.clientid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) clp->cl_confirm = clid.confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) status = nfs40_walk_client_list(clp, result, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Sustain the lease, even if it's empty. If the clientid4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * goes stale it's of no use for trunking discovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) nfs4_schedule_state_renewal(*result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* If the client state need to recover, do it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (clp->cl_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) const struct cred *nfs4_get_machine_cred(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return get_cred(rpc_machine_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void nfs4_root_machine_cred(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Force root creds instead of machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) clp->cl_principal = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) clp->cl_rpcclient->cl_principal = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static const struct cred *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) nfs4_get_renew_cred_server_locked(struct nfs_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) const struct cred *cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct rb_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) for (pos = rb_first(&server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pos != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pos = rb_next(pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (list_empty(&sp->so_states))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) cred = get_cred(sp->so_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * nfs4_get_renew_cred - Acquire credential for a renew operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @clp: client state handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Returns an rpc_cred with reference count bumped, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Caller must hold clp->cl_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const struct cred *nfs4_get_renew_cred(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) const struct cred *cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Use machine credentials if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cred = nfs4_get_machine_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (cred != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) cred = nfs4_get_renew_cred_server_locked(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (cred != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_lock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) nfs41_wake_slot_table(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) spin_unlock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void nfs4_end_drain_session(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct nfs4_session *ses = clp->cl_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (clp->cl_slot_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) nfs4_end_drain_slot_table(clp->cl_slot_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (ses != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) nfs4_end_drain_slot_table(&ses->bc_slot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) nfs4_end_drain_slot_table(&ses->fc_slot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) spin_lock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) reinit_completion(&tbl->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) spin_unlock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return wait_for_completion_interruptible(&tbl->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_unlock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int nfs4_begin_drain_session(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct nfs4_session *ses = clp->cl_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (clp->cl_slot_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return nfs4_drain_slot_tbl(clp->cl_slot_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* back channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* fore channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return nfs4_drain_slot_tbl(&ses->fc_slot_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #if defined(CONFIG_NFS_V4_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void nfs41_finish_session_reset(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* create_session negotiated new slot table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) nfs4_setup_state_renewal(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto do_confirm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) status = nfs4_proc_exchange_id(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) do_confirm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) status = nfs4_proc_create_session(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) nfs41_finish_session_reset(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) nfs_mark_client_ready(clp, NFS_CS_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * nfs41_discover_server_trunking - Detect server IP address trunking (mv1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @clp: nfs_client under test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @result: OUT: found nfs_client, or clp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @cred: credential to use for trunking test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * If NFS4_OK is returned, an nfs_client pointer is planted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * "result".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Note: The returned client may not yet be marked ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int nfs41_discover_server_trunking(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct nfs_client **result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) status = nfs4_proc_exchange_id(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (status != NFS4_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) status = nfs41_walk_client_list(clp, result, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (clp != *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Purge state if the client id was established in a prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * instance and the client id could not have arrived on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * server via Transparent State Migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!test_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) status = nfs_wait_client_init_complete(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) nfs_put_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif /* CONFIG_NFS_V4_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * nfs4_get_clid_cred - Acquire credential for a setclientid operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @clp: client state handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Returns a cred with reference count bumped, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) const struct cred *nfs4_get_clid_cred(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) cred = nfs4_get_machine_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static struct nfs4_state_owner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) nfs4_find_state_owner_locked(struct nfs_server *server, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct rb_node **p = &server->state_owners.rb_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) while (*p != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) cmp = cred_fscmp(cred, sp->so_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (cmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) else if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!list_empty(&sp->so_lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_del_init(&sp->so_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) atomic_inc(&sp->so_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static struct nfs4_state_owner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct nfs_server *server = new->so_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct rb_node **p = &server->state_owners.rb_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) while (*p != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cmp = cred_fscmp(new->so_cred, sp->so_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (cmp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) p = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) else if (cmp > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) p = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!list_empty(&sp->so_lru))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) list_del_init(&sp->so_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) atomic_inc(&sp->so_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) rb_link_node(&new->so_server_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) rb_insert_color(&new->so_server_node, &server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct nfs_server *server = sp->so_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!RB_EMPTY_NODE(&sp->so_server_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rb_erase(&sp->so_server_node, &server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) sc->create_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) sc->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) sc->counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_lock_init(&sc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) INIT_LIST_HEAD(&sc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) rpc_destroy_wait_queue(&sc->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * create a new state_owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct nfs4_state_owner *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) nfs4_alloc_state_owner(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) const struct cred *cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sp = kzalloc(sizeof(*sp), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (sp->so_seqid.owner_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kfree(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sp->so_server = server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) sp->so_cred = get_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) spin_lock_init(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) INIT_LIST_HEAD(&sp->so_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) nfs4_init_seqid_counter(&sp->so_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) atomic_set(&sp->so_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) INIT_LIST_HEAD(&sp->so_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) mutex_init(&sp->so_delegreturn_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) nfs4_reset_state_owner(struct nfs4_state_owner *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* This state_owner is no longer usable, but must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * remain in place so that state recovery can find it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * and the opens associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * It may also be used for new 'open' request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * return a delegation to the server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * So update the 'create_time' so that it looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * a new state_owner. This will cause the server to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * request an OPEN_CONFIRM to start a new sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) sp->so_seqid.create_time = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) nfs4_destroy_seqid_counter(&sp->so_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) put_cred(sp->so_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) kfree(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void nfs4_gc_state_owners(struct nfs_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct nfs4_state_owner *sp, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned long time_min, time_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) LIST_HEAD(doomed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) time_max = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) time_min = (long)time_max - (long)clp->cl_lease_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* NB: LRU is sorted so that oldest is at the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (time_in_range(sp->so_expires, time_min, time_max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) list_move(&sp->so_lru, &doomed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) nfs4_remove_state_owner_locked(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list_del(&sp->so_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) nfs4_free_state_owner(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * nfs4_get_state_owner - Look up a state owner given a credential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @server: nfs_server to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @cred: RPC credential to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @gfp_flags: allocation mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) const struct cred *cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct nfs4_state_owner *sp, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sp = nfs4_find_state_owner_locked(server, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (sp != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) new = nfs4_alloc_state_owner(server, cred, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sp = nfs4_insert_state_owner_locked(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (sp != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) nfs4_free_state_owner(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) nfs4_gc_state_owners(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * nfs4_put_state_owner - Release a nfs4_state_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * @sp: state owner data to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Note that we keep released state owners on an LRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * This caches valid state owners so that they can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * reused, to avoid the OPEN_CONFIRM on minor version 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * It also pins the uniquifier of dropped state owners for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * a while, to ensure that those state owner names are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * never reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) void nfs4_put_state_owner(struct nfs4_state_owner *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct nfs_server *server = sp->so_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sp->so_expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) list_add_tail(&sp->so_lru, &server->state_owners_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * nfs4_purge_state_owners - Release all cached state owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * @server: nfs_server with cached state owners to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * @head: resulting list of state owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Called at umount time. Remaining state owners will be on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * the LRU with ref count of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Note that the state owners are not freed, but are added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * to the list @head, which can later be used as an argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * to nfs4_free_state_owners.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct nfs4_state_owner *sp, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) list_move(&sp->so_lru, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) nfs4_remove_state_owner_locked(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * nfs4_purge_state_owners - Release all cached state owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * @head: resulting list of state owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * Frees a list of state owners that was generated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * nfs4_purge_state_owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) void nfs4_free_state_owners(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct nfs4_state_owner *sp, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) list_for_each_entry_safe(sp, tmp, head, so_lru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) list_del(&sp->so_lru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) nfs4_free_state_owner(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static struct nfs4_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) nfs4_alloc_open_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) state = kzalloc(sizeof(*state), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) refcount_set(&state->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) INIT_LIST_HEAD(&state->lock_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) spin_lock_init(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) seqlock_init(&state->seqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) init_waitqueue_head(&state->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (state->state == fmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* NB! List reordering - see the reclaim code for why. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (fmode & FMODE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) list_move(&state->open_states, &state->owner->so_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) list_move_tail(&state->open_states, &state->owner->so_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) state->state = fmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static struct nfs4_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) list_for_each_entry_rcu(state, &nfsi->open_states, inode_states) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (state->owner != owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!nfs4_valid_open_stateid(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (refcount_inc_not_zero(&state->count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) nfs4_free_open_state(struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) kfree_rcu(state, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct nfs4_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct nfs4_state *state, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) state = __nfs4_find_state_byowner(inode, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) new = nfs4_alloc_open_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) spin_lock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) state = __nfs4_find_state_byowner(inode, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (state == NULL && new != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) state = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) state->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) atomic_inc(&owner->so_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ihold(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) state->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) list_add_rcu(&state->inode_states, &nfsi->open_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Note: The reclaim code dictates that we add stateless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * and read-only stateids to the end of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) list_add_tail(&state->open_states, &owner->so_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spin_unlock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) spin_unlock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) nfs4_free_open_state(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) void nfs4_put_open_state(struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct inode *inode = state->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct nfs4_state_owner *owner = state->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (!refcount_dec_and_lock(&state->count, &owner->so_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) list_del_rcu(&state->inode_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) list_del(&state->open_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) spin_unlock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) nfs4_inode_return_delegation_on_close(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nfs4_free_open_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) nfs4_put_state_owner(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Close the current file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static void __nfs4_close(struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) fmode_t fmode, gfp_t gfp_mask, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct nfs4_state_owner *owner = state->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int call_close = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) fmode_t newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) atomic_inc(&owner->so_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Protect against nfs4_find_state() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) spin_lock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) switch (fmode & (FMODE_READ | FMODE_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) case FMODE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) state->n_rdonly--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) case FMODE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) state->n_wronly--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) case FMODE_READ|FMODE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) state->n_rdwr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) newstate = FMODE_READ|FMODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (state->n_rdwr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (state->n_rdonly == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) newstate &= ~FMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (state->n_wronly == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) newstate &= ~FMODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (newstate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) clear_bit(NFS_DELEGATED_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) nfs4_state_set_mode_locked(state, newstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_unlock(&owner->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!call_close) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) nfs4_put_open_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) nfs4_put_state_owner(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) nfs4_do_close(state, gfp_mask, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) __nfs4_close(state, fmode, GFP_NOFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) __nfs4_close(state, fmode, GFP_KERNEL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * Search the state->lock_states for an existing lock_owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * that is compatible with either of the given owners.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * If the second is non-zero, then the first refers to a Posix-lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * owner (current->files) and the second refers to a flock/OFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * owner (struct file*). In that case, prefer a match for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * If both sorts of locks are held on the one file we cannot know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * which stateid was intended to be used, so a "correct" choice cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * be made. Failing that, a "consistent" choice is preferable. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * consistent choice we make is to prefer the first owner, that of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Posix lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static struct nfs4_lock_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) __nfs4_find_lock_state(struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) fl_owner_t fl_owner, fl_owner_t fl_owner2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct nfs4_lock_state *pos, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) list_for_each_entry(pos, &state->lock_states, ls_locks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (pos->ls_owner == fl_owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (pos->ls_owner == fl_owner2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ret = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) refcount_inc(&ret->ls_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * Return a compatible lock_state. If no initialized lock_state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * exists, return an uninitialized one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct nfs4_lock_state *lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct nfs_server *server = state->owner->so_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (lsp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) nfs4_init_seqid_counter(&lsp->ls_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) refcount_set(&lsp->ls_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) lsp->ls_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) lsp->ls_owner = fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (lsp->ls_seqid.owner_id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) INIT_LIST_HEAD(&lsp->ls_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) kfree(lsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) nfs4_destroy_seqid_counter(&lsp->ls_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) kfree(lsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Return a compatible lock_state. If no initialized lock_state structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * exists, return an uninitialized one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct nfs4_lock_state *lsp, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) for(;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_lock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) lsp = __nfs4_find_lock_state(state, owner, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (lsp != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (new != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) list_add(&new->ls_locks, &state->lock_states);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) set_bit(LK_STATE_IN_USE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) lsp = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) new = nfs4_alloc_lock_state(state, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (new != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) nfs4_free_lock_state(state->owner->so_server, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * Release reference to lock_state, and free it if we see that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * it is no longer in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (lsp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) state = lsp->ls_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) list_del(&lsp->ls_locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (list_empty(&state->lock_states))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) clear_bit(LK_STATE_IN_USE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) server = state->owner->so_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) clp->cl_mvops->free_lock_state(server, lsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) nfs4_free_lock_state(server, lsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dst->fl_u.nfs4_fl.owner = lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) refcount_inc(&lsp->ls_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static void nfs4_fl_release_lock(struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static const struct file_lock_operations nfs4_fl_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .fl_copy_lock = nfs4_fl_copy_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .fl_release_private = nfs4_fl_release_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct nfs4_lock_state *lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (fl->fl_ops != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) lsp = nfs4_get_lock_state(state, fl->fl_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (lsp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) fl->fl_u.nfs4_fl.owner = lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) fl->fl_ops = &nfs4_fl_lock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) const struct nfs_lock_context *l_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct nfs4_lock_state *lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) fl_owner_t fl_owner, fl_flock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (l_ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) fl_owner = l_ctx->lockowner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) fl_flock_owner = l_ctx->open_context->flock_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_lock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) nfs4_stateid_copy(dst, &lsp->ls_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) nfs4_put_lock_state(lsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) const nfs4_stateid *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) src = &zero_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) seq = read_seqbegin(&state->seqlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (test_bit(NFS_OPEN_STATE, &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) src = &state->open_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) nfs4_stateid_copy(dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) } while (read_seqretry(&state->seqlock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Byte-range lock aware utility to initialize the stateid of read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int nfs4_select_rw_stateid(struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) fmode_t fmode, const struct nfs_lock_context *l_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) nfs4_stateid *dst, const struct cred **cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!nfs4_valid_open_stateid(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (cred != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) *cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* A lost lock - don't even consider delegations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* returns true if delegation stateid found and copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (ret != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* nfs4_copy_delegation_stateid() didn't over-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * dst, so it still has the lock stateid which we now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * choose to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ret = nfs4_copy_open_stateid(dst, state) ? 0 : -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dst->seqid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct nfs_seqid *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) new = kmalloc(sizeof(*new), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (new == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) new->sequence = counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) INIT_LIST_HEAD(&new->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) new->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) void nfs_release_seqid(struct nfs_seqid *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct nfs_seqid_counter *sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (seqid == NULL || list_empty(&seqid->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) sequence = seqid->sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) spin_lock(&sequence->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) list_del_init(&seqid->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!list_empty(&sequence->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct nfs_seqid *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) next = list_first_entry(&sequence->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct nfs_seqid, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rpc_wake_up_queued_task(&sequence->wait, next->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_unlock(&sequence->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) void nfs_free_seqid(struct nfs_seqid *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) nfs_release_seqid(seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) kfree(seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * failed with a seqid incrementing error -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * see comments nfs4.h:seqid_mutating_error()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) case -NFS4ERR_BAD_SEQID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) pr_warn_ratelimited("NFS: v4 server returned a bad"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) " sequence-id error on an"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) " unconfirmed sequence %p!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) seqid->sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) case -NFS4ERR_STALE_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case -NFS4ERR_BAD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) case -NFS4ERR_BADXDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) case -NFS4ERR_RESOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case -NFS4ERR_NOFILEHANDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case -NFS4ERR_MOVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Non-seqid mutating errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * Note: no locking needed as we are guaranteed to be first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * on the sequence list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) seqid->sequence->counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (seqid == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (status == -NFS4ERR_BAD_SEQID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) nfs4_reset_state_owner(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!nfs4_has_session(sp->so_server->nfs_client))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) nfs_increment_seqid(status, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * Increment the seqid if the LOCK/LOCKU succeeded, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * failed with a seqid incrementing error -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * see comments nfs4.h:seqid_mutating_error()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (seqid != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) nfs_increment_seqid(status, seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct nfs_seqid_counter *sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (seqid == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) sequence = seqid->sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) spin_lock(&sequence->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) seqid->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (list_empty(&seqid->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) list_add_tail(&seqid->list, &sequence->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) rpc_sleep_on(&sequence->wait, task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) spin_unlock(&sequence->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int nfs4_run_state_manager(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) rpc_wake_up(&clp->cl_rpcwaitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Schedule the nfs_client asynchronous state management routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) void nfs4_schedule_state_manager(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) refcount_inc(&clp->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* The rcu_read_lock() is not strictly necessary, as the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * manager is the only thread that ever changes the rpc_xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * after it's initialized. At this point, we're single threaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) snprintf(buf, sizeof(buf), "%s-manager",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (IS_ERR(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) printk(KERN_ERR "%s: kthread_run: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) __func__, PTR_ERR(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) nfs4_clear_state_manager_bit(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) nfs_put_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Schedule a lease recovery attempt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) void nfs4_schedule_lease_recovery(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (!clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) dprintk("%s: scheduling lease recovery for server %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * nfs4_schedule_migration_recovery - trigger migration recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * @server: FSID that is migrating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * Returns zero if recovery has started, otherwise a negative NFS4ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * value is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int nfs4_schedule_migration_recovery(const struct nfs_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (server->fh_expire_type != NFS4_FH_PERSISTENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) pr_err("NFS: volatile file handles not supported (server %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return -NFS4ERR_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (test_bit(NFS_MIG_FAILED, &server->mig_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return -NFS4ERR_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dprintk("%s: scheduling migration recovery for (%llx:%llx) on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) (unsigned long long)server->fsid.major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) (unsigned long long)server->fsid.minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) set_bit(NFS_MIG_IN_TRANSITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) &((struct nfs_server *)server)->mig_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) set_bit(NFS4CLNT_MOVED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) EXPORT_SYMBOL_GPL(nfs4_schedule_migration_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * nfs4_schedule_lease_moved_recovery - start lease-moved recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * @clp: server to check for moved leases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void nfs4_schedule_lease_moved_recovery(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) dprintk("%s: scheduling lease-moved recovery for client ID %llx on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) __func__, clp->cl_clientid, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) set_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) EXPORT_SYMBOL_GPL(nfs4_schedule_lease_moved_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int nfs4_wait_clnt_recover(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) refcount_inc(&clp->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) nfs_wait_bit_killable, TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (clp->cl_cons_state < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) res = clp->cl_cons_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) nfs_put_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) int nfs4_client_recover_expired_lease(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) unsigned int loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ret = nfs4_wait_clnt_recover(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * @clp: client to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * resend of the SETCLIENTID and hence re-establish the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * callback channel. Then return all existing delegations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) nfs_expire_all_delegations(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) nfs40_handle_cb_pathdown(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!nfs4_valid_open_stateid(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /* Don't recover state that expired before the reboot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!nfs4_valid_open_stateid(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (!nfs4_state_mark_reclaim_nograce(clp, state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) nfs_inode_find_delegation_state_and_recover(state->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) &state->stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static struct nfs4_lock_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) nfs_state_find_lock_state_by_stateid(struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct nfs4_lock_state *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) list_for_each_entry(pos, &state->lock_states, ls_locks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!test_bit(NFS_LOCK_INITIALIZED, &pos->ls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (nfs4_stateid_match_or_older(&pos->ls_stateid, stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static bool nfs_state_lock_state_matches_stateid(struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (test_bit(LK_STATE_IN_USE, &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) spin_lock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (nfs_state_find_lock_state_by_stateid(state, stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) void nfs_inode_find_state_and_recover(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) state = ctx->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (state == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (nfs4_stateid_match_or_older(&state->stateid, stateid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) nfs4_state_mark_reclaim_nograce(clp, state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (test_bit(NFS_OPEN_STATE, &state->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) nfs4_stateid_match_or_older(&state->open_stateid, stateid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) nfs4_state_mark_reclaim_nograce(clp, state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (nfs_state_lock_state_matches_stateid(state, stateid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) nfs4_state_mark_reclaim_nograce(clp, state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) nfs_inode_find_delegation_state_and_recover(inode, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct inode *inode = state->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (ctx->state != state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) set_bit(NFS_CONTEXT_BAD, &ctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) pr_warn("NFSv4: state recovery failed for open file %pd2, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) "error = %d\n", ctx->dentry, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) nfs4_state_mark_open_context_bad(state, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct inode *inode = state->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct file_lock *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct nfs4_lock_state *lsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct file_lock_context *flctx = inode->i_flctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct list_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (flctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) list = &flctx->flc_posix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* Guard against delegation returns and new lock/unlock calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) down_write(&nfsi->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) spin_lock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) list_for_each_entry(fl, list, fl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (nfs_file_open_context(fl->fl_file)->state != state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) spin_unlock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) status = ops->recover_lock(state, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) case -ESTALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) case -NFS4ERR_ADMIN_REVOKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) case -NFS4ERR_STALE_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) case -NFS4ERR_BAD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) case -NFS4ERR_EXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) case -NFS4ERR_NO_GRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) case -NFS4ERR_BADSLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) case -NFS4ERR_BAD_HIGH_SLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) pr_err("NFS: %s: unhandled error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) case -NFS4ERR_DENIED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) case -NFS4ERR_RECLAIM_BAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) case -NFS4ERR_RECLAIM_CONFLICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) lsp = fl->fl_u.nfs4_fl.owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (lsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) spin_lock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (list == &flctx->flc_posix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) list = &flctx->flc_flock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) spin_unlock(&flctx->flc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) up_write(&nfsi->rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct nfs4_copy_state *copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (!test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) !test_bit(NFS_CLNT_SRC_SSC_COPY_STATE, &state->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) spin_lock(&sp->so_server->nfs_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if ((test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) !nfs4_stateid_match_other(&state->stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ©->parent_dst_state->stateid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) copy->flags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (test_and_clear_bit(NFS_CLNT_DST_SSC_COPY_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) clear_bit(NFS_CLNT_SRC_SSC_COPY_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) complete(©->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) list_for_each_entry(copy, &sp->so_server->ss_copies, src_copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if ((test_bit(NFS_CLNT_SRC_SSC_COPY_STATE, &state->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) !nfs4_stateid_match_other(&state->stateid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ©->parent_src_state->stateid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) copy->flags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (test_and_clear_bit(NFS_CLNT_DST_SSC_COPY_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) &state->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) complete(©->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) spin_unlock(&sp->so_server->nfs_client->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) #else /* !CONFIG_NFS_V4_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #endif /* CONFIG_NFS_V4_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) const struct nfs4_state_recovery_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct nfs4_lock_state *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) status = ops->recover_open(sp, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) status = nfs4_reclaim_locks(state, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) spin_lock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) list_for_each_entry(lock, &state->lock_states, ls_locks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) trace_nfs4_state_lock_reclaim(state, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) nfs42_complete_copies(sp, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) unsigned int loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) bool found_ssc_copy_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) #endif /* CONFIG_NFS_V4_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /* Note: we rely on the sp->so_states list being ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * states first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) * This is needed to ensure that the server won't give us any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * read delegations that we have to return if, say, we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * recovering after a network partition or a reboot from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * server that doesn't support a grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) spin_lock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) list_for_each_entry(state, &sp->so_states, open_states) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!nfs4_valid_open_stateid(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (state->state == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (test_bit(NFS_SRV_SSC_COPY_STATE, &state->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) nfs4_state_mark_recovery_failed(state, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) found_ssc_copy_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) #endif /* CONFIG_NFS_V4_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) refcount_inc(&state->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) spin_unlock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) status = __nfs4_reclaim_open_state(sp, state, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (status >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) case -EROFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) case -ESTALE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* Open state on this file cannot be recovered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) nfs4_state_mark_recovery_failed(state, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (loop++ < 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) set_bit(ops->state_flag_bit, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) case -NFS4ERR_ADMIN_REVOKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case -NFS4ERR_STALE_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case -NFS4ERR_OLD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) case -NFS4ERR_BAD_STATEID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) case -NFS4ERR_RECLAIM_BAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case -NFS4ERR_RECLAIM_CONFLICT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) case -NFS4ERR_EXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) case -NFS4ERR_NO_GRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) case -NFS4ERR_BADSLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case -NFS4ERR_BAD_HIGH_SLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) nfs4_put_open_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) spin_lock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) raw_write_seqcount_end(&sp->so_reclaim_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) spin_unlock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (found_ssc_copy_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) #endif /* CONFIG_NFS_V4_2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) nfs4_put_open_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) spin_lock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) raw_write_seqcount_end(&sp->so_reclaim_seqcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) spin_unlock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static void nfs4_clear_open_state(struct nfs4_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct nfs4_lock_state *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) clear_bit(NFS_DELEGATED_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) clear_bit(NFS_O_RDONLY_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) clear_bit(NFS_O_WRONLY_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) clear_bit(NFS_O_RDWR_STATE, &state->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) spin_lock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) list_for_each_entry(lock, &state->lock_states, ls_locks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) lock->ls_seqid.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) spin_unlock(&state->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static void nfs4_reset_seqids(struct nfs_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct rb_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) for (pos = rb_first(&server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) pos != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) pos = rb_next(pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) sp->so_seqid.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) spin_lock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) list_for_each_entry(state, &sp->so_states, open_states) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (mark_reclaim(clp, state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) nfs4_clear_open_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) spin_unlock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) nfs4_reset_seqids(server, mark_reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* Mark all delegations for reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) nfs_delegation_mark_reclaim(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) static int nfs4_reclaim_complete(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) const struct nfs4_state_recovery_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /* Notify the server we're done reclaiming our state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (ops->reclaim_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return ops->reclaim_complete(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static void nfs4_clear_reclaim_server(struct nfs_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct rb_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct nfs4_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) for (pos = rb_first(&server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) pos != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pos = rb_next(pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) spin_lock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) list_for_each_entry(state, &sp->so_states, open_states) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) &state->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) nfs4_state_mark_reclaim_nograce(clp, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) spin_unlock(&sp->so_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) nfs4_clear_reclaim_server(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) nfs_delegation_reap_unclaimed(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) const struct nfs4_state_recovery_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!nfs4_state_clear_reclaim_reboot(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) ops = clp->cl_mvops->reboot_recovery_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) err = nfs4_reclaim_complete(clp, ops, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) nfs_mark_test_expired_all_delegations(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) case -NFS4ERR_CB_PATH_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) nfs40_handle_cb_pathdown(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) case -NFS4ERR_NO_GRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) nfs4_state_end_reclaim_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) nfs4_state_start_reclaim_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) case -NFS4ERR_EXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) nfs4_state_start_reclaim_nograce(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) case -NFS4ERR_BADSLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) case -NFS4ERR_BAD_HIGH_SLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) case -NFS4ERR_DEADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) case -NFS4ERR_SEQ_FALSE_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) case -NFS4ERR_SEQ_MISORDERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /* Zero session reset errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) dprintk("%s: failed to handle error %d for server %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) __func__, error, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) dprintk("%s: handled error %d for server %s\n", __func__, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct nfs4_state_owner *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct rb_node *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) LIST_HEAD(freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) nfs4_purge_state_owners(server, &freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) for (pos = rb_first(&server->state_owners);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) pos != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) pos = rb_next(pos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sp = rb_entry(pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct nfs4_state_owner, so_server_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (!test_and_clear_bit(ops->owner_flag_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) &sp->so_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (!atomic_inc_not_zero(&sp->so_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) status = nfs4_reclaim_open_state(sp, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) set_bit(ops->owner_flag_bit, &sp->so_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) nfs4_put_state_owner(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) status = nfs4_recovery_handle_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return (status != 0) ? status : -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) nfs4_put_state_owner(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) nfs4_free_state_owners(&freeme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static int nfs4_check_lease(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) const struct nfs4_state_maintenance_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) clp->cl_mvops->state_renewal_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* Is the client already known to have an expired lease? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) cred = ops->get_state_renewal_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (cred == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) status = -ENOKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (cred == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) status = ops->renew_lease(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (status == -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return nfs4_recovery_handle_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) /* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * and for recoverable errors on EXCHANGE_ID for v4.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) case -NFS4ERR_SEQ_MISORDERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) return -ESERVERFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* Lease confirmation error: retry after purging the lease */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) nfs4_state_start_reclaim_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) case -NFS4ERR_CLID_INUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) pr_err("NFS: Server %s reports our clientid is in use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) nfs_mark_client_ready(clp, -EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) case -NFS4ERR_MINOR_VERS_MISMATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dprintk("%s: exit with error %d for server %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) __func__, -EPROTONOSUPPORT, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * in nfs4_exchange_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) dprintk("%s: exit with error %d for server %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) status, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) dprintk("%s: handled error %d for server %s\n", __func__, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static int nfs4_establish_lease(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) const struct nfs4_state_recovery_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) clp->cl_mvops->reboot_recovery_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) status = nfs4_begin_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (cred == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) status = ops->establish_clid(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) pnfs_destroy_all_layouts(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * Returns zero or a negative errno. NFS4ERR values are converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * to local errno values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) static int nfs4_reclaim_lease(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) status = nfs4_establish_lease(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return nfs4_handle_reclaim_lease_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) nfs4_state_start_reclaim_nograce(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static int nfs4_purge_lease(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) status = nfs4_establish_lease(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return nfs4_handle_reclaim_lease_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) nfs4_state_start_reclaim_nograce(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * Try remote migration of one FSID from a source server to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * destination server. The source server provides a list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * potential destinations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * Returns zero or a negative NFS4ERR status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct nfs_client *clp = server->nfs_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct nfs4_fs_locations *locations = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) int status, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) dprintk("--> %s: FSID %llx:%llx on \"%s\"\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) (unsigned long long)server->fsid.major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) (unsigned long long)server->fsid.minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (page == NULL || locations == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dprintk("<-- %s: no memory\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) inode = d_inode(server->super->s_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) result = nfs4_proc_get_locations(inode, locations, page, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) __func__, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) result = -NFS4ERR_NXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (!locations->nlocations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) dprintk("<-- %s: No fs_locations data, migration skipped\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) status = nfs4_begin_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) result = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) status = nfs4_replace_transport(server, locations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) dprintk("<-- %s: failed to replace transport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) dprintk("<-- %s: migration succeeded\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (page != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) kfree(locations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) pr_err("NFS: migration recovery failed (server %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) set_bit(NFS_MIG_FAILED, &server->mig_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * Returns zero or a negative NFS4ERR status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static int nfs4_handle_migration(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) const struct nfs4_state_maintenance_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) clp->cl_mvops->state_renewal_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) dprintk("%s: migration reported on \"%s\"\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cred = ops->get_state_renewal_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (cred == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return -NFS4ERR_NOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) clp->cl_mig_gen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (server->mig_gen == clp->cl_mig_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) server->mig_gen = clp->cl_mig_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (!test_and_clear_bit(NFS_MIG_IN_TRANSITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) &server->mig_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) status = nfs4_try_migration(server, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * Test each nfs_server on the clp's cl_superblocks list to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * if it's moved to another server. Stop when the server no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * returns NFS4ERR_LEASE_MOVED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) static int nfs4_handle_lease_moved(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) const struct nfs4_state_maintenance_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) clp->cl_mvops->state_renewal_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) dprintk("%s: lease moved reported on \"%s\"\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) cred = ops->get_state_renewal_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (cred == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return -NFS4ERR_NOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) clp->cl_mig_gen++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (server->mig_gen == clp->cl_mig_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) server->mig_gen = clp->cl_mig_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) inode = d_inode(server->super->s_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) status = nfs4_proc_fsid_present(inode, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (status != -NFS4ERR_MOVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) goto restart; /* wasn't this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (nfs4_try_migration(server, cred) == -NFS4ERR_LEASE_MOVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) goto restart; /* there are more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * nfs4_discover_server_trunking - Detect server IP address trunking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * @clp: nfs_client under test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * @result: OUT: found nfs_client, or clp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * Returns zero or a negative errno. If zero is returned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * an nfs_client pointer is planted in "result".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * Note: since we are invoked in process context, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * not from inside the state manager, we cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * nfs4_handle_reclaim_lease_error().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) int nfs4_discover_server_trunking(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) struct nfs_client **result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) const struct nfs4_state_recovery_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) clp->cl_mvops->reboot_recovery_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) int i, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) clnt = clp->cl_rpcclient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) mutex_lock(&nfs_clid_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) status = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (cred == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) status = ops->detect_trunking(clp, result, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) case -EINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) case -ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (clnt->cl_softrtry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) case -NFS4ERR_STALE_CLIENTID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) dprintk("NFS: %s after status %d, retrying\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (i++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) nfs4_root_machine_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) case -NFS4ERR_CLID_INUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) case -NFS4ERR_WRONGSEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) /* No point in retrying if we already used RPC_AUTH_UNIX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) status = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (IS_ERR(clnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) status = PTR_ERR(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) /* Note: this is safe because we haven't yet marked the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * client as ready, so we are the only user of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * clp->cl_rpcclient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) clnt = xchg(&clp->cl_rpcclient, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) rpc_shutdown_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) clnt = clp->cl_rpcclient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) case -NFS4ERR_MINOR_VERS_MISMATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) status = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) case -EKEYEXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) * in nfs4_exchange_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) status = -EKEYEXPIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) mutex_unlock(&nfs_clid_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) dprintk("NFS: %s: status = %d\n", __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) #ifdef CONFIG_NFS_V4_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) struct nfs_client *clp = session->clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) void nfs41_notify_server(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /* Use CHECK_LEASE to ping the server with a SEQUENCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static void nfs4_reset_all_state(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) nfs4_state_start_reclaim_nograce(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) dprintk("%s: scheduling reset of all state for server %s!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) static void nfs41_handle_server_reboot(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) nfs4_state_start_reclaim_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) dprintk("%s: server %s rebooted!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) nfs4_reset_all_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) nfs4_state_start_reclaim_nograce(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /* FIXME: For now, we destroy all layouts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) pnfs_destroy_all_layouts(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) nfs_test_expired_all_delegations(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) dprintk("%s: Recallable state revoked on server %s!\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) dprintk("%s: server %s declared a backchannel fault\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) static void nfs41_handle_cb_path_down(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) &clp->cl_state) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) nfs4_schedule_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) bool recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) __func__, clp->cl_hostname, clp->cl_clientid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * If we're called from the state manager thread, then assume we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * already handling the RECLAIM_NEEDED and/or STATE_REVOKED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * Those flags are expected to remain set until we're done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) * recovering (see RFC5661, section 18.46.3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) goto out_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) nfs41_handle_server_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) nfs41_handle_all_state_revoked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) SEQ4_STATUS_ADMIN_STATE_REVOKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) nfs41_handle_some_state_revoked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (flags & SEQ4_STATUS_LEASE_MOVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) nfs4_schedule_lease_moved_recovery(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) nfs41_handle_recallable_state_revoked(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) out_recovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) nfs41_handle_backchannel_fault(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) SEQ4_STATUS_CB_PATH_DOWN_SESSION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) nfs41_handle_cb_path_down(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static int nfs4_reset_session(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (!nfs4_has_session(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) status = nfs4_begin_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) status = nfs4_proc_destroy_session(clp->cl_session, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) case -NFS4ERR_DEADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) case -NFS4ERR_BACK_CHAN_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) status = nfs4_recovery_handle_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) status = nfs4_proc_create_session(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) dprintk("%s: session reset failed with status %d for server %s!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) __func__, status, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) status = nfs4_handle_reclaim_lease_error(clp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) nfs41_finish_session_reset(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) dprintk("%s: session reset was successful for server %s!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) static int nfs4_bind_conn_to_session(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (!nfs4_has_session(clp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ret = nfs4_begin_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) cred = nfs4_get_clid_cred(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) ret = nfs4_proc_bind_conn_to_session(clp, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) put_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) dprintk("%s: bind_conn_to_session was successful for server %s!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) __func__, clp->cl_hostname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return nfs4_recovery_handle_error(clp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) int iomode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) iomode += IOMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (test_and_clear_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) iomode += IOMODE_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* Note: IOMODE_READ + IOMODE_RW == IOMODE_ANY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (iomode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) pnfs_layout_return_unused_byclid(clp, iomode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) #else /* CONFIG_NFS_V4_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) static int nfs4_bind_conn_to_session(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) #endif /* CONFIG_NFS_V4_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) static void nfs4_state_manager(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) const char *section = "", *section_sep = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) /* Ensure exclusive access to NFSv4 state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) trace_nfs4_state_mgr(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) section = "purge state";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) status = nfs4_purge_lease(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) section = "lease expired";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) /* We're going to have to re-establish a clientid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) status = nfs4_reclaim_lease(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /* Initialize or reset the session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) section = "reset session";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) status = nfs4_reset_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) /* Send BIND_CONN_TO_SESSION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) section = "bind conn to session";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) status = nfs4_bind_conn_to_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) section = "check lease";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) status = nfs4_check_lease(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (test_and_clear_bit(NFS4CLNT_MOVED, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) section = "migration";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) status = nfs4_handle_migration(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (test_and_clear_bit(NFS4CLNT_LEASE_MOVED, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) section = "lease moved";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) status = nfs4_handle_lease_moved(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) /* First recover reboot state... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) section = "reclaim reboot";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) status = nfs4_do_reclaim(clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) clp->cl_mvops->reboot_recovery_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (status == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) nfs4_state_end_reclaim_reboot(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* Detect expired delegations... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (test_and_clear_bit(NFS4CLNT_DELEGATION_EXPIRED, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) section = "detect expired delegations";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) nfs_reap_expired_delegations(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /* Now recover expired state... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) section = "reclaim nograce";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) status = nfs4_do_reclaim(clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) clp->cl_mvops->nograce_recovery_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (status == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) nfs4_end_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) nfs4_clear_state_manager_bit(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) nfs_client_return_marked_delegations(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) nfs4_layoutreturn_any_run(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) clear_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) /* Did we race with an attempt to give us more work? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) } while (refcount_read(&clp->cl_count) > 1 && !signalled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) goto out_drain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (strlen(section))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) section_sep = ": ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) trace_nfs4_state_mgr_failed(clp, section, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) " with error %d\n", section_sep, section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) clp->cl_hostname, -status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) out_drain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) nfs4_end_drain_session(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) nfs4_clear_state_manager_bit(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static int nfs4_run_state_manager(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) struct nfs_client *clp = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) allow_signal(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) nfs4_state_manager(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) nfs_put_client(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) module_put_and_exit(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * Local variables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * c-basic-offset: 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * End:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) */