^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2015 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Support for backward direction RPCs on RPC/RDMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sunrpc/xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sunrpc/svc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sunrpc/svc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sunrpc/svc_rdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xprt_rdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <trace/events/rpcrdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) # define RPCDBG_FACILITY RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #undef RPCRDMA_BACKCHANNEL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @xprt: transport associated with these backchannel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @reqs: number of concurrent incoming requests to expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Returns 0 on success; otherwise a negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) trace_xprtrdma_cb_setup(r_xprt, reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @xprt: transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Returns maximum size, in bytes, of a backchannel message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) size_t maxmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return maxmsg - RPCRDMA_HDRLEN_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return RPCRDMA_BACKWARD_WRS >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) rdmab_data(req->rl_rdmabuf), rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) p = xdr_reserve_space(&req->rl_stream, 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *p++ = rqst->rq_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *p++ = rpcrdma_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *p++ = rdma_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *p++ = xdr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *p++ = xdr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *p = xdr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) &rqst->rq_snd_buf, rpcrdma_noch_pullup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) trace_xprtrdma_cb_reply(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Caller holds the transport's write lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * %0 if the RPC message has been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * %-ENOTCONN if the caller should reconnect and call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * %-EIO if a permanent error occurred and the request was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * sent. Do not try to send this message again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct rpc_xprt *xprt = rqst->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!xprt_connected(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!xprt_request_get_cong(xprt, rqst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EBADSLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rc = rpcrdma_bc_marshal_reply(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto failed_marshal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (rpcrdma_post_sends(r_xprt, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto drop_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) failed_marshal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (rc != -ENOTCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) drop_connection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) xprt_rdma_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @xprt: transport associated with these backchannel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @reqs: number of incoming requests to destroy; ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rpc_rqst *rqst, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) list_del(&rqst->rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * xprt_rdma_bc_free_rqst - Release a backchannel rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @rqst: request to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct rpc_xprt *xprt = rqst->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) rpcrdma_recv_buffer_put(req->rl_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) req->rl_reply = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct rpc_xprt *xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct rpc_rqst *rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_lock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto create_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) list_del(&rqst->rq_bc_pa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) create_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_unlock(&xprt->bc_pa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Set a limit to prevent a remote from overrunning our resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (rpcrdma_req_setup(r_xprt, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rpcrdma_req_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) xprt->bc_alloc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rqst = &req->rl_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rqst->rq_xprt = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * rpcrdma_bc_receive_call - Handle a backward direction call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @r_xprt: transport receiving the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @rep: receive buffer containing the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Operational assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * o Backchannel credits are ignored, just as the NFS server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * forechannel currently does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * No replay detection is done at the transport level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct rpcrdma_rep *rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct rpc_xprt *xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct svc_serv *bc_serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct rpc_rqst *rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct xdr_buf *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) p = xdr_inline_decode(&rep->rr_stream, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) size = xdr_stream_remaining(&rep->rr_stream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #ifdef RPCRDMA_BACKCHANNEL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pr_info("RPC: %s: callback XID %08x, length=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __func__, be32_to_cpup(p), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pr_info("RPC: %s: %*ph\n", __func__, size, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rqst = rpcrdma_bc_rqst_get(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto out_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) rqst->rq_reply_bytes_recvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) rqst->rq_xid = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) rqst->rq_private_buf.len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) buf = &rqst->rq_rcv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) memset(buf, 0, sizeof(*buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) buf->head[0].iov_base = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) buf->head[0].iov_len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) buf->len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* The receive buffer has to be hooked to the rpcrdma_req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * so that it is not released while the req is pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * to its buffer, and so that it can be reposted after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * the Upper Layer is done decoding it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req->rl_reply = rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) trace_xprtrdma_cb_call(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Queue rqst for ULP's callback service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bc_serv = xprt->bc_serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_lock(&bc_serv->sv_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_unlock(&bc_serv->sv_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wake_up(&bc_serv->sv_cb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) r_xprt->rx_stats.bcall_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) out_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) pr_warn("RPC/RDMA backchannel overflow\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) xprt_force_disconnect(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* This receive buffer gets reposted automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * when the connection is re-established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }