^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2014-2017 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * licenses. You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * COPYING in the main directory of this source tree, or the BSD-type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * disclaimer in the documentation and/or other materials provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Neither the name of the Network Appliance, Inc. nor the names of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * its contributors may be used to endorse or promote products
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * derived from this software without specific prior written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * verbs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Encapsulates the major functions managing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * o adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * o endpoints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * o connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * o buffer memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/sunrpc/svc_rdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm-generic/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <rdma/ib_cm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include "xprt_rdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <trace/events/rpcrdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Globals/Macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) # define RPCDBG_FACILITY RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * internal functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rpcrdma_sendctx *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static struct rpcrdma_regbuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) gfp_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Wait for outstanding transport work to finish. ib_drain_qp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * handles the drains in the wrong order for us, so open code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * them here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct rdma_cm_id *id = ep->re_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* Flush Receives, then wait for deferred Reply work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ib_drain_rq(id->qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Deferred Reply processing might have scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * local invalidations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ib_drain_sq(id->qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rpcrdma_ep_put(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * rpcrdma_qp_event_handler - Handle one QP event (error notification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @event: details of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @context: ep that owns QP where event occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Called from the RDMA provider (device driver) possibly in an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * context. The QP is always destroyed before the ID, so the ID will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * reliably available when this handler is invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct rpcrdma_ep *ep = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) trace_xprtrdma_qp_event(ep, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Ensure xprt_force_disconnect() is invoked exactly once when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * connection is closed or lost. (The important thing is it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * to be invoked "at least" once).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) xprt_force_disconnect(ep->re_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * rpcrdma_flush_disconnect - Disconnect on flushed completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @r_xprt: transport to disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @wc: work completion entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Must be called in process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (wc->status != IB_WC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rpcrdma_force_disconnect(r_xprt->rx_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @wc: WCE for a completed Send WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct rpcrdma_sendctx *sc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rpcrdma_xprt *r_xprt = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) trace_xprtrdma_wc_send(sc, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) rpcrdma_sendctx_put_locked(r_xprt, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) rpcrdma_flush_disconnect(r_xprt, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @wc: WCE for a completed Receive WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) rr_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct rpcrdma_xprt *r_xprt = cq->cq_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) trace_xprtrdma_wc_receive(wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) --r_xprt->rx_ep->re_receive_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (wc->status != IB_WC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) goto out_flushed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* status == SUCCESS means all fields in wc are trustworthy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rep->rr_wc_flags = wc->wc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rep->rr_inv_rkey = wc->ex.invalidate_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rdmab_addr(rep->rr_rdmabuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) wc->byte_len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rpcrdma_reply_handler(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) out_flushed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rpcrdma_flush_disconnect(r_xprt, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) rpcrdma_rep_destroy(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct rdma_conn_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) const struct rpcrdma_connect_private *pmsg = param->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned int rsize, wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Default settings for RPC-over-RDMA Version One */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ep->re_implicit_roundup = xprt_rdma_pad_optimize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (pmsg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) pmsg->cp_magic == rpcrdma_cmp_magic &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pmsg->cp_version == RPCRDMA_CMP_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ep->re_implicit_roundup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (rsize < ep->re_inline_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ep->re_inline_recv = rsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (wsize < ep->re_inline_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ep->re_inline_send = wsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) rpcrdma_set_max_header_sizes(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * rpcrdma_cm_event_handler - Handle RDMA CM events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @id: rdma_cm_id on which an event has occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @event: details of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Called with @id's mutex held. Returns 1 if caller should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * destroy @id, otherwise 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct rpcrdma_ep *ep = id->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) switch (event->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case RDMA_CM_EVENT_ADDR_RESOLVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case RDMA_CM_EVENT_ROUTE_RESOLVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ep->re_async_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) complete(&ep->re_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) case RDMA_CM_EVENT_ADDR_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) ep->re_async_rc = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) complete(&ep->re_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case RDMA_CM_EVENT_ROUTE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ep->re_async_rc = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) complete(&ep->re_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case RDMA_CM_EVENT_DEVICE_REMOVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_info("rpcrdma: removing device %s for %pISpc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ep->re_id->device->name, sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case RDMA_CM_EVENT_ADDR_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ep->re_connect_status = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case RDMA_CM_EVENT_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rpcrdma_ep_get(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ep->re_connect_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rpcrdma_update_cm_private(ep, &event->param.conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) trace_xprtrdma_inline_thresh(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wake_up_all(&ep->re_connect_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) case RDMA_CM_EVENT_CONNECT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ep->re_connect_status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) goto wake_connect_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) case RDMA_CM_EVENT_UNREACHABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ep->re_connect_status = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goto wake_connect_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case RDMA_CM_EVENT_REJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sap, rdma_reject_msg(id, event->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ep->re_connect_status = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (event->status == IB_CM_REJ_STALE_CONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ep->re_connect_status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) wake_connect_worker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) wake_up_all(&ep->re_connect_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) case RDMA_CM_EVENT_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ep->re_connect_status = -ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) disconnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rpcrdma_force_disconnect(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return rpcrdma_ep_put(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dprintk("RPC: %s: %pISpc on %s/frwr: %s\n", __func__, sap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ep->re_id->device->name, rdma_event_msg(event->event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct rpcrdma_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct rpc_xprt *xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct rdma_cm_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) init_completion(&ep->re_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) RDMA_PS_TCP, IB_QPT_RC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ep->re_async_rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) RDMA_RESOLVE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rc = ep->re_async_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ep->re_async_rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rc = ep->re_async_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) rdma_destroy_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static void rpcrdma_ep_destroy(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (ep->re_id->qp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rdma_destroy_qp(ep->re_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) ep->re_id->qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ep->re_attr.recv_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ib_free_cq(ep->re_attr.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ep->re_attr.recv_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (ep->re_attr.send_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ib_free_cq(ep->re_attr.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ep->re_attr.send_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (ep->re_pd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ib_dealloc_pd(ep->re_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ep->re_pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) kfree(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) kref_get(&ep->re_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * %0 if @ep still has a positive kref count, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * %1 if @ep was destroyed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct rpcrdma_connect_private *pmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct ib_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct rdma_cm_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct rpcrdma_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ep = kzalloc(sizeof(*ep), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ep->re_xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) kref_init(&ep->re_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) id = rpcrdma_create_id(r_xprt, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (IS_ERR(id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kfree(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return PTR_ERR(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) device = id->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ep->re_id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ep->re_inline_send = xprt_rdma_max_inline_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ep->re_inline_recv = xprt_rdma_max_inline_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) rc = frwr_query_device(ep, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ep->re_attr.event_handler = rpcrdma_qp_event_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ep->re_attr.qp_context = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ep->re_attr.srq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ep->re_attr.cap.max_inline_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ep->re_attr.qp_type = IB_QPT_RC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ep->re_attr.port_num = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) "iovs: send %d recv %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ep->re_attr.cap.max_send_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ep->re_attr.cap.max_recv_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ep->re_attr.cap.max_send_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ep->re_attr.cap.max_recv_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ep->re_send_batch = ep->re_max_requests >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ep->re_send_count = ep->re_send_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) init_waitqueue_head(&ep->re_connect_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ep->re_attr.cap.max_send_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) IB_POLL_WORKQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (IS_ERR(ep->re_attr.send_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) rc = PTR_ERR(ep->re_attr.send_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ep->re_attr.send_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ep->re_attr.cap.max_recv_wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) IB_POLL_WORKQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (IS_ERR(ep->re_attr.recv_cq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rc = PTR_ERR(ep->re_attr.recv_cq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ep->re_attr.recv_cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ep->re_receive_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Initialize cma parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Prepare RDMA-CM private message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) pmsg = &ep->re_cm_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) pmsg->cp_magic = rpcrdma_cmp_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) pmsg->cp_version = RPCRDMA_CMP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ep->re_remote_cma.private_data = pmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ep->re_remote_cma.private_data_len = sizeof(*pmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Client offers RDMA Read but does not initiate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ep->re_remote_cma.initiator_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ep->re_remote_cma.responder_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* Limit transport retries so client can detect server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * GID changes quickly. RPC layer handles re-establishing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * transport connection and retransmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ep->re_remote_cma.retry_count = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* RPC-over-RDMA handles its own flow control. In addition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * make all RNR NAKs visible so we know that RPC-over-RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * flow control is working correctly (no NAKs should be seen).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ep->re_remote_cma.flow_control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) ep->re_remote_cma.rnr_retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ep->re_pd = ib_alloc_pd(device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (IS_ERR(ep->re_pd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) rc = PTR_ERR(ep->re_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ep->re_pd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) r_xprt->rx_ep = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) out_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) rpcrdma_ep_put(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rdma_destroy_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * rpcrdma_xprt_connect - Connect an unconnected transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * Returns 0 on success or a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct rpc_xprt *xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct rpcrdma_ep *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) rc = rpcrdma_ep_create(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) xprt_clear_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rpcrdma_reset_cwnd(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Bump the ep's reference count while there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * outstanding Receives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rpcrdma_ep_get(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rpcrdma_post_recvs(r_xprt, 1, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) wait_event_interruptible(ep->re_connect_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ep->re_connect_status != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (ep->re_connect_status <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rc = ep->re_connect_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rc = rpcrdma_sendctxs_create(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rc = rpcrdma_reqs_setup(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) rpcrdma_mrs_create(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) trace_xprtrdma_connect(r_xprt, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * rpcrdma_xprt_disconnect - Disconnect underlying transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Caller serializes. Either the transport send lock is held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * or we're being called to destroy the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * On return, @r_xprt is completely divested of all hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * resources and prepared for the next ->connect operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct rdma_cm_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) id = ep->re_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rc = rdma_disconnect(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) trace_xprtrdma_disconnect(r_xprt, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rpcrdma_xprt_drain(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rpcrdma_reps_unmap(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rpcrdma_reqs_reset(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rpcrdma_mrs_destroy(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rpcrdma_sendctxs_destroy(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (rpcrdma_ep_put(ep))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) rdma_destroy_id(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) r_xprt->rx_ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Fixed-size circular FIFO queue. This implementation is wait-free and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * lock-free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Consumer is the code path that posts Sends. This path dequeues a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * sendctx for use by a Send operation. Multiple consumer threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * are serialized by the RPC transport lock, which allows only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * ->send_request call at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Producer is the code path that handles Send completions. This path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * enqueues a sendctx that has been completed. Multiple producer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * threads are serialized by the ib_poll_cq() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * queue activity, and rpcrdma_xprt_drain has flushed all remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Send requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!buf->rb_sc_ctxs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) for (i = 0; i <= buf->rb_sc_last; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) kfree(buf->rb_sc_ctxs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) kfree(buf->rb_sc_ctxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) buf->rb_sc_ctxs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct rpcrdma_sendctx *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) sc->sc_cqe.done = rpcrdma_wc_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct rpcrdma_sendctx *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* Maximum number of concurrent outstanding Send WRs. Capping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * the circular queue size stops Send Queue overflow by causing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * the ->send_request call to fail temporarily before too many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Sends are posted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!buf->rb_sc_ctxs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) buf->rb_sc_last = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) for (i = 0; i <= buf->rb_sc_last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) buf->rb_sc_ctxs[i] = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) buf->rb_sc_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) buf->rb_sc_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* The sendctx queue is not guaranteed to have a size that is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * power of two, thus the helpers in circ_buf.h cannot be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * The other option is to use modulus (%), which can be expensive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) unsigned long item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return likely(item < buf->rb_sc_last) ? item + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * rpcrdma_sendctx_get_locked - Acquire a send context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Returns pointer to a free send completion context; or NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * the queue is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Usage: Called to acquire an SGE array before preparing a Send WR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * The caller serializes calls to this function (per transport), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * provides an effective memory barrier that flushes the new value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * of rb_sc_head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct rpcrdma_sendctx *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned long next_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (next_head == READ_ONCE(buf->rb_sc_tail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto out_emptyq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* ORDER: item must be accessed _before_ head is updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) sc = buf->rb_sc_ctxs[next_head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Releasing the lock in the caller acts as a memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * barrier that flushes rb_sc_head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) buf->rb_sc_head = next_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) out_emptyq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* The queue is "empty" if there have not been enough Send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * completions recently. This is a sign the Send Queue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * backing up. Cause the caller to pause and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) r_xprt->rx_stats.empty_sendctx_q++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * rpcrdma_sendctx_put_locked - Release a send context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * @sc: send context to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Usage: Called from Send completion to return a sendctxt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * to the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * The caller serializes calls to this function (per transport).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct rpcrdma_sendctx *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned long next_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Unmap SGEs of previously completed but unsignaled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Sends by walking up the queue until @sc is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) next_tail = buf->rb_sc_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) next_tail = rpcrdma_sendctx_next(buf, next_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* ORDER: item must be accessed _before_ tail is updated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) } while (buf->rb_sc_ctxs[next_tail] != sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Paired with READ_ONCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) smp_store_release(&buf->rb_sc_tail, next_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) xprt_write_space(&r_xprt->rx_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) for (count = 0; count < ep->re_max_rdma_segs; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mr = kzalloc(sizeof(*mr), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) rc = frwr_mr_init(r_xprt, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_lock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) rpcrdma_mr_push(mr, &buf->rb_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) list_add(&mr->mr_all, &buf->rb_all_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) r_xprt->rx_stats.mrs_allocated += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) trace_xprtrdma_createmrs(r_xprt, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rpcrdma_mr_refresh_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) rb_refresh_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) rpcrdma_mrs_create(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) xprt_write_space(&r_xprt->rx_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * rpcrdma_mrs_refresh - Wake the MR refresh worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* If there is no underlying connection, it's no use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * to wake the refresh worker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (ep->re_connect_status == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* The work is scheduled on a WQ_MEM_RECLAIM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * workqueue in order to prevent MR allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * from recursing into NFS during direct reclaim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * rpcrdma_req_create - Allocate an rpcrdma_req object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * @r_xprt: controlling r_xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * @size: initial size, in bytes, of send and receive buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * @flags: GFP flags passed to memory allocators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * Returns an allocated and fully initialized rpcrdma_req or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) req = kzalloc(sizeof(*req), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (req == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!req->rl_sendbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!req->rl_recvbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) INIT_LIST_HEAD(&req->rl_free_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) INIT_LIST_HEAD(&req->rl_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) spin_lock(&buffer->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) list_add(&req->rl_all, &buffer->rb_allreqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) spin_unlock(&buffer->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) kfree(req->rl_sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * @req: rpcrdma_req object to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * Returns zero on success, and a negative errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct rpcrdma_regbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) size_t maxhdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* Compute maximum header buffer size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) maxhdrsize = rpcrdma_fixed_maxsz + 3 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) maxhdrsize *= sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) DMA_TO_DEVICE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) req->rl_rdmabuf = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) rpcrdma_regbuf_free(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* ASSUMPTION: the rb_allreqs list is stable for the duration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * and thus can be walked without holding rb_lock. Eg. the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * caller is holding the transport send lock to exclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * device removal or disconnection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rc = rpcrdma_req_setup(r_xprt, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static void rpcrdma_req_reset(struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* Credits are valid for only one connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) req->rl_slot.rq_cong = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) rpcrdma_regbuf_free(req->rl_rdmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) req->rl_rdmabuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) frwr_reset(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* ASSUMPTION: the rb_allreqs list is stable for the duration,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * and thus can be walked without holding rb_lock. Eg. the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * caller is holding the transport send lock to exclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * device removal or disconnection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) list_for_each_entry(req, &buf->rb_allreqs, rl_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rpcrdma_req_reset(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* No locking needed here. This function is called only by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Receive completion handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) bool temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct rpcrdma_rep *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) rep = kzalloc(sizeof(*rep), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (rep == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) DMA_FROM_DEVICE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (!rep->rr_rdmabuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) goto out_free_regbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rdmab_length(rep->rr_rdmabuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) rep->rr_cqe.done = rpcrdma_wc_receive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rep->rr_rxprt = r_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rep->rr_recv_wr.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rep->rr_recv_wr.num_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rep->rr_temp = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) out_free_regbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rpcrdma_regbuf_free(rep->rr_rdmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) kfree(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* No locking needed here. This function is invoked only by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * Receive completion handler, or during transport shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) list_del(&rep->rr_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) rpcrdma_regbuf_free(rep->rr_rdmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) kfree(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct llist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Calls to llist_del_first are required to be serialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) node = llist_del_first(&buf->rb_free_reps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return llist_entry(node, struct rpcrdma_rep, rr_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct rpcrdma_rep *rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) llist_add(&rep->rr_node, &buf->rb_free_reps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct rpcrdma_rep *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) rep->rr_temp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct rpcrdma_rep *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rpcrdma_rep_destroy(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * rpcrdma_buffer_create - Create initial set of req/rep objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * @r_xprt: transport instance to (re)initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * Returns zero on success, otherwise a negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) buf->rb_bc_srv_max_requests = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) spin_lock_init(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) INIT_LIST_HEAD(&buf->rb_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) INIT_LIST_HEAD(&buf->rb_all_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) INIT_LIST_HEAD(&buf->rb_send_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) INIT_LIST_HEAD(&buf->rb_allreqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) INIT_LIST_HEAD(&buf->rb_all_reps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) list_add(&req->rl_list, &buf->rb_send_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) init_llist_head(&buf->rb_free_reps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rpcrdma_buffer_destroy(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * rpcrdma_req_destroy - Destroy an rpcrdma_req object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * @req: unused object to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * Relies on caller holding the transport send lock to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * removing req->rl_all from buf->rb_all_reqs safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) void rpcrdma_req_destroy(struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) list_del(&req->rl_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) spin_lock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) list_del(&mr->mr_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_unlock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) frwr_release_mr(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) rpcrdma_regbuf_free(req->rl_recvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) rpcrdma_regbuf_free(req->rl_sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) rpcrdma_regbuf_free(req->rl_rdmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * rpcrdma_mrs_destroy - Release all of a transport's MRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Relies on caller holding the transport send lock to protect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * removing mr->mr_list from req->rl_free_mrs safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) cancel_work_sync(&buf->rb_refresh_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) spin_lock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct rpcrdma_mr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) mr_all)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) list_del(&mr->mr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) list_del(&mr->mr_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) spin_unlock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) frwr_release_mr(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) spin_lock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) spin_unlock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * rpcrdma_buffer_destroy - Release all hw resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * @buf: root control block for resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * ORDERING: relies on a prior rpcrdma_xprt_drain :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * - No more Send or Receive completions can occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * - All MRs, reps, and reqs are returned to their free lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) rpcrdma_reps_destroy(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) while (!list_empty(&buf->rb_send_bufs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) req = list_first_entry(&buf->rb_send_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct rpcrdma_req, rl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) list_del(&req->rl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) rpcrdma_req_destroy(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * rpcrdma_mr_get - Allocate an rpcrdma_mr object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * @r_xprt: controlling transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Returns an initialized rpcrdma_mr or NULL if no free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * rpcrdma_mr objects are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct rpcrdma_mr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) spin_lock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mr = rpcrdma_mr_pop(&buf->rb_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) spin_unlock(&buf->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * rpcrdma_mr_put - DMA unmap an MR and release it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * @mr: MR to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) void rpcrdma_mr_put(struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (mr->mr_dir != DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) trace_xprtrdma_mr_unmap(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) mr->mr_sg, mr->mr_nents, mr->mr_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) mr->mr_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * rpcrdma_reply_put - Put reply buffers back into pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * @buffers: buffer pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * @req: object to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (req->rl_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) rpcrdma_rep_put(buffers, req->rl_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) req->rl_reply = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * rpcrdma_buffer_get - Get a request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * @buffers: Buffer pool from which to obtain a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * Returns a fresh rpcrdma_req, or NULL if none are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct rpcrdma_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_lock(&buffers->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) req = list_first_entry_or_null(&buffers->rb_send_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct rpcrdma_req, rl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) list_del_init(&req->rl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) spin_unlock(&buffers->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * rpcrdma_buffer_put - Put request/reply buffers back into pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * @buffers: buffer pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * @req: object to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) rpcrdma_reply_put(buffers, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) spin_lock(&buffers->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) list_add(&req->rl_list, &buffers->rb_send_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_unlock(&buffers->rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * @rep: rep to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * Used after error conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * receiving the payload of RDMA RECV operations. During Long Calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * or Replies they may be registered externally via frwr_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static struct rpcrdma_regbuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct rpcrdma_regbuf *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) rb = kmalloc(sizeof(*rb), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) rb->rg_data = kmalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (!rb->rg_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) kfree(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rb->rg_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) rb->rg_direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) rb->rg_iov.length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * @rb: regbuf to reallocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * @size: size of buffer to be allocated, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * @flags: GFP flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * Returns true if reallocation was successful. If false is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * returned, @rb is left untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) buf = kmalloc(size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) rpcrdma_regbuf_dma_unmap(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) kfree(rb->rg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) rb->rg_data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rb->rg_iov.length = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * @rb: regbuf to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * Returns true if the buffer is now DMA mapped to @r_xprt's device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct ib_device *device = r_xprt->rx_ep->re_id->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (rb->rg_direction == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) rdmab_length(rb), rb->rg_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) trace_xprtrdma_dma_maperr(rdmab_addr(rb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) rb->rg_device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (!rpcrdma_regbuf_is_mapped(rb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) rb->rg_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) rb->rg_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) rpcrdma_regbuf_dma_unmap(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) kfree(rb->rg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) kfree(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * rpcrdma_post_sends - Post WRs to a transport's Send Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * @req: rpcrdma_req containing the Send WR to post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * Returns 0 if the post was successful, otherwise -ENOTCONN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct ib_send_wr *send_wr = &req->rl_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) send_wr->send_flags |= IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ep->re_send_count = ep->re_send_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) send_wr->send_flags &= ~IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) --ep->re_send_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) trace_xprtrdma_post_send(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) rc = frwr_send(r_xprt, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * rpcrdma_post_recvs - Refill the Receive Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * @needed: current credit grant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * @temp: mark Receive buffers to be deleted after one use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct ib_recv_wr *wr, *bad_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct rpcrdma_rep *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) int count, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (likely(ep->re_receive_count > needed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) needed -= ep->re_receive_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) needed += RPCRDMA_MAX_RECV_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* fast path: all needed reps can be found on the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) while (needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) rep = rpcrdma_rep_get_locked(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (rep && rep->rr_temp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) rpcrdma_rep_destroy(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (!rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) rep = rpcrdma_rep_create(r_xprt, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (!rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) trace_xprtrdma_post_recv(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) rep->rr_recv_wr.next = wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) wr = &rep->rr_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) --needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) rc = ib_post_recv(ep->re_id->qp, wr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) (const struct ib_recv_wr **)&bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) trace_xprtrdma_post_recvs(r_xprt, count, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) for (wr = bad_wr; wr;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct rpcrdma_rep *rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) wr = wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) rpcrdma_recv_buffer_put(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) ep->re_receive_count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }