^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/nfs/callback_proc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004 Trond Myklebust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * NFSv4 callback procedures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/nfs4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "nfs4_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "callback.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "delegation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "nfs4session.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "nfs4trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define NFSDBG_FACILITY NFSDBG_CALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __be32 nfs4_callback_getattr(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct cb_getattrargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct cb_getattrres *res = resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct nfs_delegation *delegation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) res->bitmap[0] = res->bitmap[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) res->status = htonl(NFS4ERR_BADHANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dprintk_rcu("NFS: GETATTR callback request from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) inode = nfs_delegation_find_inode(cps->clp, &args->fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (inode == ERR_PTR(-EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) res->status = htonl(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) -ntohl(res->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) delegation = nfs4_get_valid_delegation(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto out_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) res->size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) res->change_attr = delegation->change_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (nfs_have_writebacks(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) res->change_attr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) res->ctime = inode->i_ctime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) res->mtime = inode->i_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) args->bitmap[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) args->bitmap[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) res->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) out_iput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) nfs_iput_and_deactive(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return res->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __be32 nfs4_callback_recall(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct cb_recallargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __be32 res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) dprintk_rcu("NFS: RECALL callback request from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) res = htonl(NFS4ERR_BADHANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) inode = nfs_delegation_find_inode(cps->clp, &args->fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (IS_ERR(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (inode == ERR_PTR(-EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) res = htonl(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) &args->stateid, -ntohl(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Set up a helper thread to actually return the delegation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) res = htonl(NFS4ERR_BAD_STATEID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) res = htonl(NFS4ERR_RESOURCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) &args->stateid, -ntohl(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) nfs_iput_and_deactive(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #if defined(CONFIG_NFS_V4_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Lookup a layout inode by stateid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Note: returns a refcount on the inode and superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __must_hold(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!pnfs_layout_is_valid(lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (nfs_sb_active(server->super))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) inode = igrab(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) inode = ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) nfs_sb_deactive(server->super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Lookup a layout inode by filehandle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Note: returns a refcount on the inode and superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) const struct nfs_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct nfs_inode *nfsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) nfsi = NFS_I(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (nfs_compare_fh(fh, &nfsi->fh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (nfsi->layout != lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (nfs_sb_active(server->super))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) inode = igrab(lo->plh_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) inode = ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) nfs_sb_deactive(server->super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) const struct nfs_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) const nfs4_stateid *stateid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) inode = nfs_layout_find_inode_by_stateid(clp, stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (inode == ERR_PTR(-ENOENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) inode = nfs_layout_find_inode_by_fh(clp, fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) const nfs4_stateid *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 oldseq, newseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Is the stateid not initialised? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!pnfs_layout_is_valid(lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Mismatched stateid? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return NFS4ERR_BAD_STATEID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) newseq = be32_to_cpu(new->seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Are we already in a layout recall situation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) lo->plh_return_seq != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (newseq < lo->plh_return_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return NFS4ERR_OLD_STATEID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (newseq > lo->plh_return_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Check that the stateid matches what we think it should be. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) oldseq = be32_to_cpu(lo->plh_stateid.seqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (newseq > oldseq + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Crazy server! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (newseq <= oldseq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return NFS4ERR_OLD_STATEID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return NFS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static u32 initiate_file_draining(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct cb_layoutrecallargs *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct inode *ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct pnfs_layout_hdr *lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) LIST_HEAD(free_me_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (IS_ERR(ino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (ino == ERR_PTR(-EAGAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rv = NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto out_noput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pnfs_layoutcommit_inode(ino, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) spin_lock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) lo = NFS_I(ino)->layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pnfs_get_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (rv != NFS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rv = NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) &args->cbl_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) be32_to_cpu(args->cbl_stateid.seqid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case -EBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* There are layout segments that need to be returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) rv = NFS4_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Embrace your forgetfulness! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rv = NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) &args->cbl_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) spin_unlock(&ino->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pnfs_free_lseg_list(&free_me_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Free all lsegs that are attached to commit buckets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) nfs_commit_inode(ino, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pnfs_put_layout_hdr(lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nfs_iput_and_deactive(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) out_noput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) &args->cbl_stateid, -rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static u32 initiate_bulk_draining(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct cb_layoutrecallargs *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (args->cbl_recall_type == RETURN_FSID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) stat = pnfs_destroy_layouts_byclid(clp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (stat != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return NFS4ERR_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return NFS4ERR_NOMATCHING_LAYOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static u32 do_callback_layoutrecall(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct cb_layoutrecallargs *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (args->cbl_recall_type == RETURN_FILE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return initiate_file_draining(clp, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return initiate_bulk_draining(clp, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __be32 nfs4_callback_layoutrecall(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct cb_layoutrecallargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 res = NFS4ERR_OP_NOT_IN_SESSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (cps->clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) res = do_callback_layoutrecall(cps->clp, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return cpu_to_be32(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void pnfs_recall_all_layouts(struct nfs_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct cb_layoutrecallargs args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Pretend we got a CB_LAYOUTRECALL(ALL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) memset(&args, 0, sizeof(args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) args.cbl_recall_type = RETURN_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* FIXME we ignore errors, what should we do? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) do_callback_layoutrecall(clp, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __be32 nfs4_callback_devicenotify(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct cb_devicenotifyargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) const struct pnfs_layoutdriver_type *ld = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __be32 res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!cps->clp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) for (i = 0; i < args->ndevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct cb_devicenotifyitem *dev = &args->devs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!ld || ld->id != dev->cbd_layout_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pnfs_put_layoutdriver(ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (!ld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pnfs_put_layoutdriver(ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) kfree(args->devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Validate the sequenceID sent by the server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Return success if the sequenceID is one more than what we last saw on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * this slot, accounting for wraparound. Increments the slot's sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * We don't yet implement a duplicate request cache, instead we set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * back channel ca_maxresponsesize_cached to zero. This is OK for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * since we only currently implement idempotent callbacks anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * We have a single slot backchannel at this time, so we don't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * checking the used_slots bit array on the table. The lower layer guarantees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * a single outstanding callback request at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) const struct cb_sequenceargs * args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) __be32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = cpu_to_be32(NFS4ERR_BADSLOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (args->csa_slotid > tbl->server_highest_slotid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Replay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (args->csa_sequenceid == slot->seq_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ret = cpu_to_be32(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (nfs4_test_locked_slot(tbl, slot->slot_nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Signal process_op to set this error on next op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (args->csa_cachethis == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Liar! We never allowed you to set csa_cachethis != 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Note: wraparound relies on seq_nr being of type u32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Misordered request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (args->csa_sequenceid != slot->seq_nr + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return cpu_to_be32(NFS4_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) trace_nfs4_cb_seqid_err(args, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * For each referring call triple, check the session's slot table for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * a match. If the slot is in use and the sequence numbers match, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * client is still waiting for a response to the original request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int referring_call_exists(struct nfs_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) uint32_t nrclists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct referring_call_list *rclists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) __releases(lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) __acquires(lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct nfs4_session *session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct nfs4_slot_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct referring_call_list *rclist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct referring_call *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * XXX When client trunking is implemented, this becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * a session lookup from within the loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) session = clp->cl_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) tbl = &session->fc_slot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) for (i = 0; i < nrclists; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rclist = &rclists[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (memcmp(session->sess_id.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rclist->rcl_sessionid.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) NFS4_MAX_SESSIONID_LEN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) for (j = 0; j < rclist->rcl_nrefcalls; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ref = &rclist->rcl_refcalls[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ref->rc_sequenceid, HZ >> 1) < 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __be32 nfs4_callback_sequence(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct cb_sequenceargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct cb_sequenceres *res = resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct nfs4_slot_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct nfs4_slot *slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct nfs_client *clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __be32 status = htonl(NFS4ERR_BADSESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &args->csa_sessionid, cps->minorversion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (clp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tbl = &clp->cl_session->bc_slot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Set up res before grabbing the spinlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) memcpy(&res->csr_sessionid, &args->csa_sessionid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sizeof(res->csr_sessionid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) res->csr_sequenceid = args->csa_sequenceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) res->csr_slotid = args->csa_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_lock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* state manager is resetting the session */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) status = htonl(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Return NFS4ERR_BADSESSION if we're draining the session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * in order to reset it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) status = htonl(NFS4ERR_BADSESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) status = htonl(NFS4ERR_BADSLOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) slot = nfs4_lookup_slot(tbl, args->csa_slotid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (IS_ERR(slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) res->csr_highestslotid = tbl->server_highest_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) res->csr_target_highestslotid = tbl->target_highest_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) status = validate_seqid(tbl, slot, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!nfs4_try_to_lock_slot(tbl, slot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) status = htonl(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) cps->slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* The ca_maxresponsesize_cached is 0 with no DRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (args->csa_cachethis != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Check for pending referring calls. If a match is found, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * related callback was received before the response to the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) &tbl->slot_tbl_lock) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) status = htonl(NFS4ERR_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * RFC5661 20.9.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * If CB_SEQUENCE returns an error, then the state of the slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * (sequence ID, cached reply) MUST NOT change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) slot->seq_nr = args->csa_sequenceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) spin_unlock(&tbl->slot_tbl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cps->clp = clp; /* put in nfs4_callback_compound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) for (i = 0; i < args->csa_nrclists; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kfree(args->csa_rclists[i].rcl_refcalls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) kfree(args->csa_rclists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cps->drc_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) res->csr_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) trace_nfs4_cb_sequence(args, res, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) validate_bitmap_values(unsigned int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) __be32 nfs4_callback_recallany(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct cb_recallanyargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) fmode_t flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bool schedule_manager = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!cps->clp) /* set in cb_sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) status = cpu_to_be32(NFS4ERR_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!validate_bitmap_values(args->craa_type_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) status = cpu_to_be32(NFS4_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) flags = FMODE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) flags |= FMODE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) nfs_expire_unused_delegation_types(cps->clp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) pnfs_recall_all_layouts(cps->clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) schedule_manager = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) schedule_manager = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (schedule_manager)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) nfs4_schedule_state_manager(cps->clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Reduce the fore channel's max_slots to the target value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) __be32 nfs4_callback_recallslot(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct cb_recallslotargs *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct nfs4_slot_table *fc_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) __be32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!cps->clp) /* set in cb_sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) args->crsa_target_highest_slotid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) fc_tbl = &cps->clp->cl_session->fc_slot_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) status = htonl(NFS4_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) nfs41_notify_server(cps->clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) __be32 nfs4_callback_notify_lock(void *argp, void *resp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct cb_notify_lock_args *args = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!cps->clp) /* set in cb_sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return htonl(NFS4ERR_OP_NOT_IN_SESSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Don't wake anybody if the string looked bogus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (args->cbnl_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return htonl(NFS4_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #endif /* CONFIG_NFS_V4_1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) #ifdef CONFIG_NFS_V4_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct cb_offloadargs *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) cp_state->count = args->wr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) cp_state->error = args->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!args->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) cp_state->verf.committed = args->wr_writeverf.committed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) memcpy(&cp_state->verf.verifier.data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) &args->wr_writeverf.verifier.data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) NFS4_VERIFIER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __be32 nfs4_callback_offload(void *data, void *dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct cb_process_state *cps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct cb_offloadargs *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct nfs_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct nfs4_copy_state *copy, *tmp_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return htonl(NFS4ERR_SERVERFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_lock(&cps->clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) client_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (memcmp(args->coa_stateid.other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tmp_copy->stateid.other,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) sizeof(args->coa_stateid.other)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) nfs4_copy_cb_args(tmp_copy, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) complete(&tmp_copy->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) nfs4_copy_cb_args(copy, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) kfree(copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) spin_unlock(&cps->clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) #endif /* CONFIG_NFS_V4_2 */