Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2015, 2017 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) /* Lightweight memory registration using Fast Registration Work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Requests (FRWR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * FRWR features ordered asynchronous registration and invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * of arbitrarily-sized memory regions. This is the fastest and safest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * but most complex memory registration mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /* Normal operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * Work Request (frwr_map). When the RDMA operation is finished, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * Memory Region is invalidated using a LOCAL_INV Work Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * (frwr_unmap_async and frwr_unmap_sync).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Typically FAST_REG Work Requests are not signaled, and neither are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * RDMA Send Work Requests (with the exception of signaling occasionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * to prevent provider work queue overflows). This greatly reduces HCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * interrupt workload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* Transport recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * frwr_map and frwr_unmap_* cannot run at the same time the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * connect worker is running. The connect worker holds the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * send lock, just as ->send_request does. This prevents frwr_map and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * the connect worker from running concurrently. When a connection is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * closed, the Receive completion queue is drained before the allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * the connect worker to get control. This prevents frwr_unmap and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * connect worker from running concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * When the underlying transport disconnects, MRs that are in flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * are flushed and are likely unusable. Thus all MRs are destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * New MRs are created on demand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/sunrpc/svc_rdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include "xprt_rdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <trace/events/rpcrdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) # define RPCDBG_FACILITY	RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * frwr_release_mr - Destroy one MR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * @mr: MR allocated by frwr_mr_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) void frwr_release_mr(struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	rc = ib_dereg_mr(mr->frwr.fr_mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		trace_xprtrdma_frwr_dereg(mr, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	kfree(mr->mr_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	kfree(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void frwr_mr_recycle(struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	trace_xprtrdma_mr_recycle(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (mr->mr_dir != DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		trace_xprtrdma_mr_unmap(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 				mr->mr_sg, mr->mr_nents, mr->mr_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		mr->mr_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_lock(&r_xprt->rx_buf.rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	list_del(&mr->mr_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	r_xprt->rx_stats.mrs_recycled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	spin_unlock(&r_xprt->rx_buf.rb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	frwr_release_mr(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /* frwr_reset - Place MRs back on the free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @req: request to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * Used after a failed marshal. For FRWR, this means the MRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * don't have to be fully released and recreated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * NB: This is safe only as long as none of @req's MRs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * Work Request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void frwr_reset(struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		rpcrdma_mr_put(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * frwr_mr_init - Initialize one MR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * @mr: generic MR to prepare for FRWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * Returns zero if successful. Otherwise a negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	unsigned int depth = ep->re_max_fr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct ib_mr *frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (IS_ERR(frmr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		goto out_mr_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		goto out_list_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	mr->mr_xprt = r_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mr->frwr.fr_mr = frmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	mr->mr_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	INIT_LIST_HEAD(&mr->mr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	init_completion(&mr->frwr.fr_linv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	sg_init_table(sg, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mr->mr_sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) out_mr_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	rc = PTR_ERR(frmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	trace_xprtrdma_frwr_alloc(mr, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) out_list_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	ib_dereg_mr(frmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * frwr_query_device - Prepare a transport for use with FRWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * @ep: endpoint to fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * @device: RDMA device to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * On success, sets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *	ep->re_attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *	ep->re_max_requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *	ep->re_max_rdma_segs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *	ep->re_max_fr_depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *	ep->re_mrtype
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *   On success, returns zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *   %-EINVAL - the device does not support FRWR memory registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *   %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	const struct ib_device_attr *attrs = &device->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	int max_qp_wr, depth, delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	unsigned int max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	    attrs->max_fast_reg_page_list_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		       device->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	max_sge = min_t(unsigned int, attrs->max_send_sge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			RPCRDMA_MAX_SEND_SGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	ep->re_attr.cap.max_send_sge = max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	ep->re_attr.cap.max_recv_sge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	ep->re_mrtype = IB_MR_TYPE_MEM_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	/* Quirk: Some devices advertise a large max_fast_reg_page_list_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	 * capability, but perform optimally when the MRs are not larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * than a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		ep->re_max_fr_depth = attrs->max_sge_rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	/* Add room for frwr register and invalidate WRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	 * 1. FRWR reg WR for head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 * 2. FRWR invalidate WR for head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 * 3. N FRWR reg WRs for pagelist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * 4. N FRWR invalidate WRs for pagelist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 * 5. FRWR reg WR for tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * 6. FRWR invalidate WR for tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 * 7. The RDMA_SEND WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	depth = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* Calculate N if the device max FRWR depth is smaller than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 * RPCRDMA_MAX_DATA_SEGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			depth += 2; /* FRWR reg + invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			delta -= ep->re_max_fr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		} while (delta > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	max_qp_wr = attrs->max_qp_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	max_qp_wr -= RPCRDMA_BACKWARD_WRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	max_qp_wr -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (ep->re_max_requests > max_qp_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		ep->re_max_requests = max_qp_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		ep->re_max_requests = max_qp_wr / depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		if (!ep->re_max_requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	ep->re_max_rdma_segs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	/* Reply chunks require segments for head and tail buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	ep->re_max_rdma_segs += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/* Ensure the underlying device is capable of conveying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * largest r/wsize NFS will ask for. This guarantees that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * failing over from one RDMA device to another will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * break NFS I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * frwr_map - Register a memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * @r_xprt: controlling transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * @seg: memory region co-ordinates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * @nsegs: number of segments remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * @writing: true when RDMA Write will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * @xid: XID of RPC using the registered memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * @mr: MR to fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * Prepare a REG_MR Work Request to register a memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * for remote access via RDMA READ or RDMA WRITE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * Returns the next segment or a negative errno pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * On success, @mr is filled in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 				struct rpcrdma_mr_seg *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				int nsegs, bool writing, __be32 xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 				struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct ib_reg_wr *reg_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	int i, n, dma_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	struct ib_mr *ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	u8 key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (nsegs > ep->re_max_fr_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		nsegs = ep->re_max_fr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	for (i = 0; i < nsegs;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		if (seg->mr_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			sg_set_page(&mr->mr_sg[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				    seg->mr_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 				    seg->mr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 				    offset_in_page(seg->mr_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				   seg->mr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		++seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	mr->mr_dir = rpcrdma_data_dir(writing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mr->mr_nents = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 				  mr->mr_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (!dma_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		goto out_dmamap_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	ibmr = mr->frwr.fr_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (n != dma_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		goto out_mapmr_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	ibmr->iova &= 0x00000000ffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	key = (u8)(ibmr->rkey & 0x000000FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	ib_update_fast_reg_key(ibmr, ++key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	reg_wr = &mr->frwr.fr_regwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	reg_wr->mr = ibmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	reg_wr->key = ibmr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	reg_wr->access = writing ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			 IB_ACCESS_REMOTE_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	mr->mr_handle = ibmr->rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	mr->mr_length = ibmr->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	mr->mr_offset = ibmr->iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	trace_xprtrdma_mr_map(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) out_dmamap_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	mr->mr_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	trace_xprtrdma_frwr_sgerr(mr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) out_mapmr_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	trace_xprtrdma_frwr_maperr(mr, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * @wc: WCE for a completed FastReg WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct rpcrdma_frwr *frwr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	/* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	trace_xprtrdma_wc_fastreg(wc, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	/* The MR will get recycled when the associated req is retransmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	rpcrdma_flush_disconnect(cq->cq_context, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * frwr_send - post Send WRs containing the RPC Call message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * @req: prepared RPC Call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * For FRWR, chain any FastReg WRs to the Send WR. Only a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * single ib_post_send call is needed to register memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * and then post the Send WR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * Returns the return code from ib_post_send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * Caller must hold the transport send lock to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * pointers to the transport's rdma_cm_id and QP are stable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	struct ib_send_wr *post_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	post_wr = &req->rl_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	list_for_each_entry(mr, &req->rl_registered, mr_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		struct rpcrdma_frwr *frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		frwr = &mr->frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		frwr->fr_cqe.done = frwr_wc_fastreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		frwr->fr_regwr.wr.next = post_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		frwr->fr_regwr.wr.num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		frwr->fr_regwr.wr.send_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		post_wr = &frwr->fr_regwr.wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  * frwr_reminv - handle a remotely invalidated mr on the @mrs list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  * @rep: Received reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  * @mrs: list of MRs to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	list_for_each_entry(mr, mrs, mr_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		if (mr->mr_handle == rep->rr_inv_rkey) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			list_del_init(&mr->mr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			trace_xprtrdma_mr_reminv(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			rpcrdma_mr_put(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			break;	/* only one invalidated MR per RPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (wc->status != IB_WC_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		frwr_mr_recycle(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		rpcrdma_mr_put(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * @wc: WCE for a completed LocalInv WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct rpcrdma_frwr *frwr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	/* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	trace_xprtrdma_wc_li(wc, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	__frwr_release_mr(wc, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	rpcrdma_flush_disconnect(cq->cq_context, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * @cq: completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * @wc: WCE for a completed LocalInv WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * Awaken anyone waiting for an MR to finish being fenced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct rpcrdma_frwr *frwr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	/* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	trace_xprtrdma_wc_li_wake(wc, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	__frwr_release_mr(wc, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	complete(&frwr->fr_linv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	rpcrdma_flush_disconnect(cq->cq_context, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  * frwr_unmap_sync - invalidate memory regions that were registered for @req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  * @req: rpcrdma_req with a non-empty list of MRs to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * Sleeps until it is safe for the host CPU to access the previously mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * memory regions. This guarantees that registered MRs are properly fenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  * from the server before the RPC consumer accesses the data in them. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  * also ensures proper Send flow control: waking the next RPC waits until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * this RPC has relinquished all its Send Queue entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	struct ib_send_wr *first, **prev, *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	const struct ib_send_wr *bad_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct rpcrdma_frwr *frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	/* ORDER: Invalidate all of the MRs first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	 * Chain the LOCAL_INV Work Requests and post them with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	 * a single ib_post_send() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	frwr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	prev = &first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		trace_xprtrdma_mr_localinv(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		r_xprt->rx_stats.local_inv_needed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		frwr = &mr->frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		frwr->fr_cqe.done = frwr_wc_localinv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		last = &frwr->fr_invwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		last->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		last->wr_cqe = &frwr->fr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		last->sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		last->num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		last->opcode = IB_WR_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		last->send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		last->ex.invalidate_rkey = mr->mr_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		*prev = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		prev = &last->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	/* Strong send queue ordering guarantees that when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 * last WR in the chain completes, all WRs in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	 * are complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	frwr->fr_cqe.done = frwr_wc_localinv_wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	reinit_completion(&frwr->fr_linv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	/* Transport disconnect drains the receive CQ before it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	 * replaces the QP. The RPC reply handler won't call us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	 * unless re_id->qp is a valid pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	bad_wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	/* The final LOCAL_INV WR in the chain is supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	 * do the wake. If it was never posted, the wake will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	 * not happen, so don't wait in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	if (bad_wr != first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		wait_for_completion(&frwr->fr_linv_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	trace_xprtrdma_post_linv(req, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	while (bad_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		frwr = container_of(bad_wr, struct rpcrdma_frwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 				    fr_invwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		bad_wr = bad_wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		frwr_mr_recycle(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  * @cq:	completion queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  * @wc:	WCE for a completed LocalInv WR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct ib_cqe *cqe = wc->wr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	struct rpcrdma_frwr *frwr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	/* WARNING: Only wr_cqe and status are reliable at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	trace_xprtrdma_wc_li_done(wc, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	__frwr_release_mr(wc, mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	/* Ensure @rep is generated before __frwr_release_mr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	rpcrdma_complete_rqst(rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	rpcrdma_flush_disconnect(cq->cq_context, wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)  * frwr_unmap_async - invalidate memory regions that were registered for @req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)  * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)  * @req: rpcrdma_req with a non-empty list of MRs to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  * This guarantees that registered MRs are properly fenced from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  * server before the RPC consumer accesses the data in them. It also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  * ensures proper Send flow control: waking the next RPC waits until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * this RPC has relinquished all its Send Queue entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	struct ib_send_wr *first, *last, **prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	const struct ib_send_wr *bad_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	struct rpcrdma_frwr *frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	/* Chain the LOCAL_INV Work Requests and post them with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	 * a single ib_post_send() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	frwr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	prev = &first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		trace_xprtrdma_mr_localinv(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		r_xprt->rx_stats.local_inv_needed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		frwr = &mr->frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		frwr->fr_cqe.done = frwr_wc_localinv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		last = &frwr->fr_invwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		last->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		last->wr_cqe = &frwr->fr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		last->sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		last->num_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		last->opcode = IB_WR_LOCAL_INV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		last->send_flags = IB_SEND_SIGNALED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		last->ex.invalidate_rkey = mr->mr_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		*prev = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		prev = &last->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	/* Strong send queue ordering guarantees that when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	 * last WR in the chain completes, all WRs in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	 * are complete. The last completion will wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	 * RPC waiter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	frwr->fr_cqe.done = frwr_wc_localinv_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	/* Transport disconnect drains the receive CQ before it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	 * replaces the QP. The RPC reply handler won't call us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	 * unless re_id->qp is a valid pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	bad_wr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	trace_xprtrdma_post_linv(req, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	while (bad_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		bad_wr = bad_wr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		frwr_mr_recycle(mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	/* The final LOCAL_INV WR in the chain is supposed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	 * do the wake. If it was never posted, the wake will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	 * not happen, so wake here in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	rpcrdma_complete_rqst(req->rl_reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }