^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2001 The Regents of the University of Michigan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Kendrick Smith <kmsmith@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Andy Adamson <andros@umich.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 1. Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 2. Redistributions in binary form must reproduce the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * notice, this list of conditions and the following disclaimer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * documentation and/or other materials provided with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 3. Neither the name of the University nor the names of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * contributors may be used to endorse or promote products derived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * from this software without specific prior written permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/sunrpc/xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sunrpc/svc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "nfsd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "state.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "xdr4cb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "xdr4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define NFSDDBG_FACILITY NFSDDBG_PROC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void nfsd4_mark_cb_fault(struct nfs4_client *, int reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NFSPROC4_CB_NULL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define NFSPROC4_CB_COMPOUND 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Index of predefined Linux callback client operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct nfs4_cb_compound_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 ident; /* minorversion 0 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 nops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __be32 *nops_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 minorversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* res */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static __be32 *xdr_encode_empty_array(__be32 *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *p++ = xdr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Encode/decode NFSv4 CB basic data types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Basic NFSv4 callback data types are defined in section 15 of RFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * 3530: "Network File System (NFS) version 4 Protocol" and section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 1 Protocol"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * nfs_cb_opnum4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * enum nfs_cb_opnum4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * OP_CB_GETATTR = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum nfs_cb_opnum4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) OP_CB_GETATTR = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) OP_CB_RECALL = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) OP_CB_LAYOUTRECALL = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) OP_CB_NOTIFY = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) OP_CB_PUSH_DELEG = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) OP_CB_RECALL_ANY = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) OP_CB_RECALLABLE_OBJ_AVAIL = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) OP_CB_RECALL_SLOT = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) OP_CB_SEQUENCE = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) OP_CB_WANTS_CANCELLED = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) OP_CB_NOTIFY_LOCK = 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) OP_CB_NOTIFY_DEVICEID = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) OP_CB_OFFLOAD = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) OP_CB_ILLEGAL = 10044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *p = cpu_to_be32(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * nfs_fh4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * typedef opaque nfs_fh4<NFS4_FHSIZE>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 length = fh->fh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) BUG_ON(length > NFS4_FHSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) p = xdr_reserve_space(xdr, 4 + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) xdr_encode_opaque(p, &fh->fh_base, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * stateid4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * struct stateid4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * uint32_t seqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * opaque other[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *p++ = cpu_to_be32(sid->si_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * sessionid4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void encode_sessionid4(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) const struct nfsd4_session *session)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) xdr_encode_opaque_fixed(p, session->se_sessionid.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) NFS4_MAX_SESSIONID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * nfsstat4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) } nfs_cb_errtbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) { NFS4_OK, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) { NFS4ERR_PERM, -EPERM },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) { NFS4ERR_NOENT, -ENOENT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) { NFS4ERR_IO, -EIO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) { NFS4ERR_NXIO, -ENXIO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) { NFS4ERR_ACCESS, -EACCES },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) { NFS4ERR_EXIST, -EEXIST },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) { NFS4ERR_XDEV, -EXDEV },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) { NFS4ERR_NOTDIR, -ENOTDIR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) { NFS4ERR_ISDIR, -EISDIR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) { NFS4ERR_INVAL, -EINVAL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) { NFS4ERR_FBIG, -EFBIG },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { NFS4ERR_NOSPC, -ENOSPC },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) { NFS4ERR_ROFS, -EROFS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) { NFS4ERR_MLINK, -EMLINK },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) { NFS4ERR_DQUOT, -EDQUOT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) { NFS4ERR_STALE, -ESTALE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) { NFS4ERR_BADHANDLE, -EBADHANDLE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) { NFS4ERR_NOTSUPP, -ENOTSUPP },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) { NFS4ERR_TOOSMALL, -ETOOSMALL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) { NFS4ERR_BADTYPE, -EBADTYPE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) { NFS4ERR_LOCKED, -EAGAIN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) { NFS4ERR_RESOURCE, -EREMOTEIO },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) { NFS4ERR_SYMLINK, -ELOOP },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) { NFS4ERR_DEADLOCK, -EDEADLK },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) { -1, -EIO }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * If we cannot translate the error, the recovery routines should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Note: remaining NFSv4 error codes have values > 10000, so should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * not conflict with native Linux error codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int nfs_cb_stat_to_errno(int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (nfs_cb_errtbl[i].stat == status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return nfs_cb_errtbl[i].errno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return -status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int decode_cb_op_status(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enum nfs_cb_opnum4 expected, int *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) p = xdr_inline_decode(xdr, 4 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (unlikely(p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) op = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (unlikely(op != expected))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto out_unexpected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) out_unexpected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dprintk("NFSD: Callback server returned operation %d but "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) "we issued a request for %d\n", op, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * CB_COMPOUND4args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * struct CB_COMPOUND4args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * utf8str_cs tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * uint32_t minorversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * uint32_t callback_ident;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * nfs_cb_argop4 argarray<>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static void encode_cb_compound4args(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __be32 * p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) p = xdr_encode_empty_array(p); /* empty tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *p++ = cpu_to_be32(hdr->minorversion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *p++ = cpu_to_be32(hdr->ident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hdr->nops_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *p = cpu_to_be32(hdr->nops); /* argarray element count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Update argarray element count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *hdr->nops_p = cpu_to_be32(hdr->nops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * CB_COMPOUND4res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * struct CB_COMPOUND4res {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * nfsstat4 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * utf8str_cs tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * nfs_cb_resop4 resarray<>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int decode_cb_compound4res(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) p = xdr_inline_decode(xdr, 4 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (unlikely(p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) hdr->status = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Ignore the tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) length = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) p = xdr_inline_decode(xdr, length + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (unlikely(p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) p += XDR_QUADLEN(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) hdr->nops = be32_to_cpup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * CB_RECALL4args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * struct CB_RECALL4args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * stateid4 stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * bool truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * nfs_fh4 fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void encode_cb_recall4args(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) const struct nfs4_delegation *dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) encode_stateid4(xdr, &dp->dl_stid.sc_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) *p++ = xdr_zero; /* truncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) encode_nfs_fh4(xdr, &dp->dl_stid.sc_file->fi_fhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) hdr->nops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * CB_SEQUENCE4args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * struct CB_SEQUENCE4args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * sessionid4 csa_sessionid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * sequenceid4 csa_sequenceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * slotid4 csa_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * slotid4 csa_highest_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * bool csa_cachethis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * referring_call_list4 csa_referring_call_lists<>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void encode_cb_sequence4args(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) const struct nfsd4_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (hdr->minorversion == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) encode_sessionid4(xdr, session);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *p++ = xdr_zero; /* csa_slotid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *p++ = xdr_zero; /* csa_highest_slotid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *p++ = xdr_zero; /* csa_cachethis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xdr_encode_empty_array(p); /* csa_referring_call_lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) hdr->nops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * CB_SEQUENCE4resok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * struct CB_SEQUENCE4resok {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * sessionid4 csr_sessionid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * sequenceid4 csr_sequenceid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * slotid4 csr_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * slotid4 csr_highest_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * slotid4 csr_target_highest_slotid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * case NFS4_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * CB_SEQUENCE4resok csr_resok4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * void;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Our current back channel implmentation supports a single backchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * with a single slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int decode_cb_sequence4resok(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int status = -ESERVERFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u32 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * If the server returns different values for sessionID, slotID or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * sequence number, the server is looney tunes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (unlikely(p == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (memcmp(p, session->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dprintk("NFS: %s Invalid session id\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dummy = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (dummy != session->se_cb_seq_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dprintk("NFS: %s Invalid sequence number\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dummy = be32_to_cpup(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (dummy != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dprintk("NFS: %s Invalid slotid\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * FIXME: process highest slotid and target highest slotid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cb->cb_seq_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int decode_cb_sequence4res(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (cb->cb_clp->cl_minorversion == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (unlikely(status || cb->cb_seq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return decode_cb_sequence4resok(xdr, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * NFSv4.0 and NFSv4.1 XDR encode functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * NFSv4.0 callback argument types are defined in section 15 of RFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * 3530: "Network File System (NFS) version 4 Protocol" and section 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Protocol".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * NB: Without this zero space reservation, callbacks over krb5p fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) const void *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) xdr_reserve_space(xdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * 20.2. Operation 4: CB_RECALL - Recall a Delegation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) const struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) const struct nfs4_delegation *dp = cb_to_delegation(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct nfs4_cb_compound_hdr hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .ident = cb->cb_clp->cl_cb_ident,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .minorversion = cb->cb_clp->cl_minorversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) encode_cb_compound4args(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) encode_cb_sequence4args(xdr, cb, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) encode_cb_recall4args(xdr, dp, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) encode_cb_nops(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * NFSv4.0 and NFSv4.1 XDR decode functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * NFSv4.0 callback result types are defined in section 15 of RFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * 3530: "Network File System (NFS) version 4 Protocol" and section 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Protocol".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) void *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * 20.2. Operation 4: CB_RECALL - Recall a Delegation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct nfs4_cb_compound_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) status = decode_cb_compound4res(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) status = decode_cb_sequence4res(xdr, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (unlikely(status || cb->cb_seq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * CB_LAYOUTRECALL4args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * struct layoutrecall_file4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * nfs_fh4 lor_fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * offset4 lor_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * length4 lor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * stateid4 lor_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * case LAYOUTRECALL4_FILE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * layoutrecall_file4 lor_layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * case LAYOUTRECALL4_FSID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * fsid4 lor_fsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * case LAYOUTRECALL4_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * void;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * struct CB_LAYOUTRECALL4args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * layouttype4 clora_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * layoutiomode4 clora_iomode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * bool clora_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * layoutrecall4 clora_recall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void encode_cb_layout4args(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) const struct nfs4_layout_stateid *ls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) BUG_ON(hdr->minorversion == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) p = xdr_reserve_space(xdr, 5 * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) *p++ = cpu_to_be32(OP_CB_LAYOUTRECALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) *p++ = cpu_to_be32(ls->ls_layout_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *p++ = cpu_to_be32(IOMODE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *p++ = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *p = cpu_to_be32(RETURN_FILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) p = xdr_reserve_space(xdr, 2 * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) p = xdr_encode_hyper(p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) xdr_encode_hyper(p, NFS4_MAX_UINT64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) encode_stateid4(xdr, &ls->ls_recall_sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) hdr->nops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) const struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) const struct nfs4_layout_stateid *ls =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) container_of(cb, struct nfs4_layout_stateid, ls_recall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct nfs4_cb_compound_hdr hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) .ident = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) .minorversion = cb->cb_clp->cl_minorversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) encode_cb_compound4args(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) encode_cb_sequence4args(xdr, cb, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) encode_cb_layout4args(xdr, ls, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) encode_cb_nops(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct nfs4_cb_compound_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) status = decode_cb_compound4res(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) status = decode_cb_sequence4res(xdr, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (unlikely(status || cb->cb_seq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #endif /* CONFIG_NFSD_PNFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) const struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) const struct nfsd4_blocked_lock *nbl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct nfs4_cb_compound_hdr hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .ident = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) .minorversion = cb->cb_clp->cl_minorversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) BUG_ON(hdr.minorversion == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) encode_cb_compound4args(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) encode_cb_sequence4args(xdr, cb, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) encode_nfs_fh4(xdr, &nbl->nbl_fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) encode_stateowner(xdr, &lo->lo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) hdr.nops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) encode_cb_nops(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct nfs4_cb_compound_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) status = decode_cb_compound4res(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) status = decode_cb_sequence4res(xdr, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (unlikely(status || cb->cb_seq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * struct write_response4 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * stateid4 wr_callback_id<1>;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * length4 wr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * stable_how4 wr_committed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * verifier4 wr_writeverf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * union offload_info4 switch (nfsstat4 coa_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * case NFS4_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * write_response4 coa_resok4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * length4 coa_bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * struct CB_OFFLOAD4args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * nfs_fh4 coa_fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * stateid4 coa_stateid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * offload_info4 coa_offload_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void encode_offload_info4(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) __be32 nfserr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) const struct nfsd4_copy *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *p++ = nfserr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!nfserr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) p = xdr_encode_empty_array(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) NFS4_VERIFIER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) p = xdr_reserve_space(xdr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* We always return success if bytes were written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) p = xdr_encode_hyper(p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void encode_cb_offload4args(struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) __be32 nfserr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) const struct knfsd_fh *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) const struct nfsd4_copy *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct nfs4_cb_compound_hdr *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) p = xdr_reserve_space(xdr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *p++ = cpu_to_be32(OP_CB_OFFLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) encode_nfs_fh4(xdr, fh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) encode_stateid4(xdr, &cp->cp_res.cb_stateid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) encode_offload_info4(xdr, nfserr, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) hdr->nops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) const struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) const struct nfsd4_copy *cp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) container_of(cb, struct nfsd4_copy, cp_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct nfs4_cb_compound_hdr hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .ident = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .minorversion = cb->cb_clp->cl_minorversion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) encode_cb_compound4args(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) encode_cb_sequence4args(xdr, cb, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) encode_cb_nops(&hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct nfsd4_callback *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct nfs4_cb_compound_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) status = decode_cb_compound4res(xdr, &hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (unlikely(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) status = decode_cb_sequence4res(xdr, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (unlikely(status || cb->cb_seq_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * RPC procedure tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #define PROC(proc, call, argtype, restype) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) [NFSPROC4_CLNT_##proc] = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .p_proc = NFSPROC4_CB_##call, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .p_encode = nfs4_xdr_enc_##argtype, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .p_decode = nfs4_xdr_dec_##restype, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .p_arglen = NFS4_enc_##argtype##_sz, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .p_replen = NFS4_dec_##restype##_sz, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) .p_statidx = NFSPROC4_CB_##call, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .p_name = #proc, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static const struct rpc_procinfo nfs4_cb_procedures[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) PROC(CB_NULL, NULL, cb_null, cb_null),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #ifdef CONFIG_NFSD_PNFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static const struct rpc_version nfs_cb_version4 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Note on the callback rpc program version number: despite language in rfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * 5661 section 18.36.3 requiring servers to use 4 in this field, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * in practice that appears to be what implementations use. The section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * 18.36.3 language is expected to be fixed in an erratum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .number = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) .procs = nfs4_cb_procedures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) .counts = nfs4_cb_counts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static const struct rpc_version *nfs_cb_version[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) [1] = &nfs_cb_version4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static const struct rpc_program cb_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static struct rpc_stat cb_stats = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .program = &cb_program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #define NFS4_CALLBACK 0x40000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static const struct rpc_program cb_program = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .name = "nfs4_cb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) .number = NFS4_CALLBACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .nrvers = ARRAY_SIZE(nfs_cb_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) .version = nfs_cb_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .stats = &cb_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) .pipe_dir_name = "nfsd4_cb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static int max_cb_time(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * nfsd4_lease is set to at most one hour in __nfsd4_write_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * so we can use 32-bit math on it. Warn if that assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * ever stops being true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (WARN_ON_ONCE(nn->nfsd4_lease > 3600))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 360 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static struct workqueue_struct *callback_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return queue_work(callback_wq, &cb->cb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) atomic_inc(&clp->cl_cb_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (atomic_dec_and_test(&clp->cl_cb_inflight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) wake_up_var(&clp->cl_cb_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) wait_var_event(&clp->cl_cb_inflight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) !atomic_read(&clp->cl_cb_inflight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (clp->cl_minorversion == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) client->cl_principal = clp->cl_cred.cr_targ_princ ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) clp->cl_cred.cr_targ_princ : "nfs";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return get_cred(rpc_machine_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct cred *kcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) kcred = prepare_kernel_cred(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!kcred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) kcred->uid = ses->se_cb_sec.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) kcred->gid = ses->se_cb_sec.gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return kcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int maxtime = max_cb_time(clp->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct rpc_timeout timeparms = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) .to_initval = maxtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .to_retries = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) .to_maxval = maxtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct rpc_create_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) .net = clp->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) .address = (struct sockaddr *) &conn->cb_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) .addrsize = conn->cb_addrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .saddress = (struct sockaddr *) &conn->cb_saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .timeout = &timeparms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .program = &cb_program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .version = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .cred = current_cred(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct rpc_clnt *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) const struct cred *cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (clp->cl_minorversion == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!clp->cl_cred.cr_principal &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) trace_nfsd_cb_setup_err(clp, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) args.client_name = clp->cl_cred.cr_principal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) args.prognumber = conn->cb_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) args.protocol = XPRT_TRANSPORT_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) args.authflavor = clp->cl_cred.cr_flavor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) clp->cl_cb_ident = conn->cb_ident;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!conn->cb_xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) trace_nfsd_cb_setup_err(clp, -EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) clp->cl_cb_session = ses;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) args.bc_xprt = conn->cb_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) args.prognumber = clp->cl_cb_session->se_cb_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) XPRT_TRANSPORT_BC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) args.authflavor = ses->se_cb_sec.flavor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* Create RPC client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) client = rpc_create(&args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (IS_ERR(client)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return PTR_ERR(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) cred = get_backchannel_cred(clp, client, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!cred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) trace_nfsd_cb_setup_err(clp, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) rpc_shutdown_client(client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) clp->cl_cb_client = client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) clp->cl_cb_cred = cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) trace_nfsd_cb_setup(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) clp->cl_cb_state = NFSD4_CB_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) trace_nfsd_cb_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) clp->cl_cb_state = NFSD4_CB_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) trace_nfsd_cb_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) trace_nfsd_cb_done(clp, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (task->tk_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) nfsd4_mark_cb_down(clp, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) clp->cl_cb_state = NFSD4_CB_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) trace_nfsd_cb_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) static void nfsd4_cb_probe_release(void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) nfsd41_cb_inflight_end(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static const struct rpc_call_ops nfsd4_cb_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* XXX: release method to ensure we set the cb channel down if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * necessary on early failure? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .rpc_call_done = nfsd4_cb_probe_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) .rpc_release = nfsd4_cb_probe_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Poke the callback thread to process any updates to the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * parameters, and send a null probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) void nfsd4_probe_callback(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) clp->cl_cb_state = NFSD4_CB_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) trace_nfsd_cb_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) nfsd4_run_cb(&clp->cl_cb_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) void nfsd4_probe_callback_sync(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) nfsd4_probe_callback(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) flush_workqueue(callback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) clp->cl_cb_state = NFSD4_CB_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) trace_nfsd_cb_state(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * There's currently a single callback channel slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * If the slot is available, then mark it busy. Otherwise, set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * thread for sleeping on the callback RPC wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!cb->cb_holds_slot &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Race breaker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) dprintk("%s slot is busy\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) cb->cb_holds_slot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (cb->cb_holds_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) cb->cb_holds_slot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) clear_bit(0, &clp->cl_cb_slot_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) rpc_wake_up_next(&clp->cl_cb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) nfsd41_cb_release_slot(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (cb->cb_ops && cb->cb_ops->release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) cb->cb_ops->release(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) nfsd41_cb_inflight_end(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * TODO: cb_sequence should support referring call lists, cachethis, multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * slots, and mark callback channel down on communication errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct nfsd4_callback *cb = calldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) u32 minorversion = clp->cl_minorversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * cb_seq_status is only set in decode_cb_sequence4res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * and so will remain 1 if an rpc level failure occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) cb->cb_seq_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) cb->cb_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (minorversion && !nfsd41_cb_get_slot(cb, task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) rpc_call_start(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct nfsd4_session *session = clp->cl_cb_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!clp->cl_minorversion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * If the backchannel connection was shut down while this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * task was queued, we need to resubmit it after setting up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * a new backchannel connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * Note that if we lost our callback connection permanently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * the submission code will error out, so we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * handle that case here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (RPC_SIGNALLED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto need_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!cb->cb_holds_slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto need_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) switch (cb->cb_seq_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * No need for lock, access serialized in nfsd4_cb_prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * RFC5661 20.9.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * If CB_SEQUENCE returns an error, then the state of the slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * (sequence ID, cached reply) MUST NOT change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ++session->se_cb_seq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) case -ESERVERFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ++session->se_cb_seq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) case -NFS4ERR_BADSESSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) case -NFS4ERR_DELAY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!rpc_restart_call(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) rpc_delay(task, 2 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case -NFS4ERR_BADSLOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) goto retry_nowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) case -NFS4ERR_SEQ_MISORDERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (session->se_cb_seq_nr != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) session->se_cb_seq_nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto retry_nowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) dprintk("%s: unprocessed error %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) cb->cb_seq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) nfsd41_cb_release_slot(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dprintk("%s: freed slot, new seqid=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) clp->cl_cb_session->se_cb_seq_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (RPC_SIGNALLED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto need_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) retry_nowait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (rpc_restart_call_prepare(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) need_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) cb->cb_need_restart = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct nfsd4_callback *cb = calldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) trace_nfsd_cb_done(clp, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (!nfsd4_cb_sequence_done(task, cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (cb->cb_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) WARN_ON_ONCE(task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) task->tk_status = cb->cb_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) switch (cb->cb_ops->done(cb, task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) rpc_restart_call_prepare(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) nfsd4_mark_cb_down(clp, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static void nfsd4_cb_release(void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct nfsd4_callback *cb = calldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (cb->cb_need_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) nfsd4_queue_cb(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) nfsd41_destroy_cb(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static const struct rpc_call_ops nfsd4_cb_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) .rpc_call_prepare = nfsd4_cb_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) .rpc_call_done = nfsd4_cb_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) .rpc_release = nfsd4_cb_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int nfsd4_create_callback_queue(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!callback_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) void nfsd4_destroy_callback_queue(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) destroy_workqueue(callback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* must be called under the state lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) void nfsd4_shutdown_callback(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * Note this won't actually result in a null callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * instead, nfsd4_run_cb_null() will detect the killed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * client, destroy the rpc client, and stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) nfsd4_run_cb(&clp->cl_cb_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) flush_workqueue(callback_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) nfsd41_cb_inflight_wait_complete(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* requires cl_lock: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct nfsd4_session *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) list_for_each_entry(c, &s->se_conns, cn_persession) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (c->cn_flags & NFS4_CDFC4_BACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * Note there isn't a lot of locking in this code; instead we depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * the fact that it is run from the callback_wq, which won't run two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * work items at once. So, for example, callback_wq handles all access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct nfs4_cb_conn conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct nfsd4_session *ses = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct nfsd4_conn *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * This is either an update, or the client dying; in either case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * kill the old client:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (clp->cl_cb_client) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) trace_nfsd_cb_shutdown(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) rpc_shutdown_client(clp->cl_cb_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) clp->cl_cb_client = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) put_cred(clp->cl_cb_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) clp->cl_cb_cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (clp->cl_cb_conn.cb_xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) svc_xprt_put(clp->cl_cb_conn.cb_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) clp->cl_cb_conn.cb_xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) spin_lock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * Only serialized callback code is allowed to clear these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * flags; main nfsd code can only set them:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) c = __nfsd4_find_backchannel(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) svc_xprt_get(c->cn_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) conn.cb_xprt = c->cn_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ses = c->cn_session;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) spin_unlock(&clp->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) err = setup_callback_client(clp, &conn, ses);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) nfsd4_mark_cb_down(clp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) svc_xprt_put(c->cn_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) nfsd4_run_cb_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) struct nfsd4_callback *cb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) container_of(work, struct nfsd4_callback, cb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (cb->cb_need_restart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) cb->cb_need_restart = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (cb->cb_ops && cb->cb_ops->prepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) cb->cb_ops->prepare(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) nfsd4_process_cb_update(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) clnt = clp->cl_cb_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!clnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Callback channel broken, or client killed; give up: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) nfsd41_destroy_cb(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * Don't send probe messages for 4.1 or later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (!cb->cb_ops && clp->cl_minorversion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) clp->cl_cb_state = NFSD4_CB_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) nfsd41_destroy_cb(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) cb->cb_msg.rpc_cred = clp->cl_cb_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) cb->cb_clp = clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) cb->cb_msg.rpc_argp = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) cb->cb_msg.rpc_resp = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) cb->cb_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) cb->cb_seq_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) cb->cb_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) cb->cb_need_restart = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) cb->cb_holds_slot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) void nfsd4_run_cb(struct nfsd4_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct nfs4_client *clp = cb->cb_clp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) nfsd41_cb_inflight_begin(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!nfsd4_queue_cb(cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) nfsd41_cb_inflight_end(clp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }