Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * COPYING in the main directory of this source tree, or the BSD-type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *      Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *      notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *      Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *      copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *      disclaimer in the documentation and/or other materials provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *      with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *      Neither the name of the Network Appliance, Inc. nor the names of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *      its contributors may be used to endorse or promote products
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *      derived from this software without specific prior written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *      permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define _LINUX_SUNRPC_XPRT_RDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <linux/wait.h> 		/* wait_queue_head_t, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <linux/spinlock.h> 		/* spinlock_t, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <linux/atomic.h>		/* atomic_t, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <linux/kref.h>			/* struct kref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <linux/workqueue.h>		/* struct work_struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <linux/llist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <rdma/rdma_cm.h>		/* RDMA connection api */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <rdma/ib_verbs.h>		/* RDMA verbs api */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define RPCRDMA_BIND_TO		(60U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define RPCRDMA_INIT_REEST_TO	(5U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define RPCRDMA_MAX_REEST_TO	(30U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define RPCRDMA_IDLE_DISC_TO	(5U * 60 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * RDMA Endpoint -- connection endpoint details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) struct rpcrdma_ep {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	struct kref		re_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct rdma_cm_id 	*re_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct ib_pd		*re_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	unsigned int		re_max_rdma_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	unsigned int		re_max_fr_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	bool			re_implicit_roundup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	enum ib_mr_type		re_mrtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct completion	re_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned int		re_send_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	unsigned int		re_send_batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	unsigned int		re_max_inline_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	unsigned int		re_max_inline_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	int			re_async_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	int			re_connect_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	atomic_t		re_force_disconnect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct ib_qp_init_attr	re_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	wait_queue_head_t       re_connect_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct rpc_xprt		*re_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct rpcrdma_connect_private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 				re_cm_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct rdma_conn_param	re_remote_cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	int			re_receive_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	unsigned int		re_max_requests; /* depends on device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned int		re_inline_send;	/* negotiated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned int		re_inline_recv;	/* negotiated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /* Pre-allocate extra Work Requests for handling backward receives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * and sends. This is a fixed value because the Work Queues are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * allocated when the forward channel is set up, long before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * backchannel is provisioned. This value is two times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * NFS4_DEF_CB_SLOT_TABLE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define RPCRDMA_BACKWARD_WRS (32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define RPCRDMA_BACKWARD_WRS (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct rpcrdma_regbuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct ib_sge		rg_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct ib_device	*rg_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	enum dma_data_direction	rg_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	void			*rg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return rb->rg_iov.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return rb->rg_iov.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return rb->rg_iov.lkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return rb->rg_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return rb->rg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define RPCRDMA_DEF_GFP		(GFP_NOIO | __GFP_NOWARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* To ensure a transport can always make forward progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * the number of RDMA segments allowed in header chunk lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * is capped at 16. This prevents less-capable devices from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * overrunning the Send buffer while building chunk lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * Elements of the Read list take up more room than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * Write list or Reply chunk. 16 read segments means the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * chunk lists cannot consume more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * ((16 + 2) * read segment size) + 1 XDR words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * or about 400 bytes. The fixed part of the header is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * another 24 bytes. Thus when the inline threshold is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * 1024 bytes, at least 600 bytes are available for RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * message bodies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	RPCRDMA_MAX_HDR_SEGS = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * struct rpcrdma_rep -- this structure encapsulates state required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * to receive and complete an RPC Reply, asychronously. It needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * several pieces of state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *   o receive buffer and ib_sge (donated to provider)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *   o status of receive (success or not, length, inv rkey)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *   o bookkeeping state to get run by reply handler (XDR stream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * These structures are allocated during transport initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * N of these are associated with a transport instance, managed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * struct rpcrdma_buffer. N is the max number of outstanding RPCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct rpcrdma_rep {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct ib_cqe		rr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	__be32			rr_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	__be32			rr_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	__be32			rr_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	int			rr_wc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	u32			rr_inv_rkey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	bool			rr_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct rpcrdma_regbuf	*rr_rdmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct rpcrdma_xprt	*rr_rxprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct rpc_rqst		*rr_rqst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct xdr_buf		rr_hdrbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	struct xdr_stream	rr_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct llist_node	rr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct ib_recv_wr	rr_recv_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct list_head	rr_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* To reduce the rate at which a transport invokes ib_post_recv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * (and thus the hardware doorbell rate), xprtrdma posts Receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * WRs in batches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * Setting this to zero disables Receive post batching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	RPCRDMA_MAX_RECV_BATCH = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct rpcrdma_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct rpcrdma_sendctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct ib_cqe		sc_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct rpcrdma_req	*sc_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	unsigned int		sc_unmap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct ib_sge		sc_sges[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * struct rpcrdma_mr - external memory region metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  * An external memory region is any buffer or page that is registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * on the fly (ie, not pre-registered).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct rpcrdma_frwr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct ib_mr			*fr_mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct ib_cqe			fr_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct completion		fr_linv_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		struct ib_reg_wr	fr_regwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		struct ib_send_wr	fr_invwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct rpcrdma_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct rpcrdma_mr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct list_head	mr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct rpcrdma_req	*mr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	struct scatterlist	*mr_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	int			mr_nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	enum dma_data_direction	mr_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct rpcrdma_frwr	frwr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct rpcrdma_xprt	*mr_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	u32			mr_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	u32			mr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	u64			mr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct list_head	mr_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * struct rpcrdma_req -- structure central to the request/reply sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * N of these are associated with a transport instance, and stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * struct rpcrdma_buffer. N is the max number of outstanding requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * It includes pre-registered buffer memory for send AND recv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * The recv buffer, however, is not owned by this structure, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * is "donated" to the hardware when a recv is posted. When a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * reply is handled, the recv buffer used is given back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * struct rpcrdma_req associated with the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * In addition to the basic memory, this structure includes an array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * of iovs for send operations. The reason is that the iovs passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * ib_post_{send,recv} must not be modified until the work request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Maximum number of page-sized "segments" per chunk list to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * registered or invalidated. Must handle a Reply chunk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	RPCRDMA_MAX_IOV_SEGS	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	RPCRDMA_MAX_DATA_SEGS	= ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	RPCRDMA_MAX_SEGS	= RPCRDMA_MAX_DATA_SEGS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				  RPCRDMA_MAX_IOV_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rpcrdma_mr_seg {		/* chunk descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	u32		mr_len;		/* length of chunk or segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct page	*mr_page;	/* owning page, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	char		*mr_offset;	/* kva if no page, else offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* The Send SGE array is provisioned to send a maximum size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * inline request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * - RPC-over-RDMA header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * - xdr_buf head iovec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * - RPCRDMA_MAX_INLINE bytes, in pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * - xdr_buf tail iovec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * The actual number of array elements consumed by each RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * depends on the device's max_sge limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	RPCRDMA_MIN_SEND_SGES = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct rpcrdma_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct rpcrdma_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct list_head	rl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct rpc_rqst		rl_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct rpcrdma_rep	*rl_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct xdr_stream	rl_stream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct xdr_buf		rl_hdrbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct ib_send_wr	rl_wr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct rpcrdma_sendctx	*rl_sendctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	struct rpcrdma_regbuf	*rl_rdmabuf;	/* xprt header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct rpcrdma_regbuf	*rl_sendbuf;	/* rq_snd_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	struct rpcrdma_regbuf	*rl_recvbuf;	/* rq_rcv_buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct list_head	rl_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct kref		rl_kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct list_head	rl_free_mrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct list_head	rl_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static inline struct rpcrdma_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rpcr_to_rdmar(const struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	return container_of(rqst, struct rpcrdma_req, rl_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	list_add(&mr->mr_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static inline struct rpcrdma_mr *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rpcrdma_mr_pop(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct rpcrdma_mr *mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (mr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		list_del_init(&mr->mr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	return mr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * inline requests/replies, and client/server credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * One of these is associated with a transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct rpcrdma_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	spinlock_t		rb_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	struct list_head	rb_send_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	struct list_head	rb_mrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	unsigned long		rb_sc_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	unsigned long		rb_sc_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	unsigned long		rb_sc_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct rpcrdma_sendctx	**rb_sc_ctxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct list_head	rb_allreqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct list_head	rb_all_mrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct list_head	rb_all_reps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	struct llist_head	rb_free_reps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	__be32			rb_max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	u32			rb_credits;	/* most recent credit grant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	u32			rb_bc_srv_max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	u32			rb_bc_max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct work_struct	rb_refresh_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * Statistics for RPCRDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct rpcrdma_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	/* accessed when sending a call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	unsigned long		read_chunk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	unsigned long		write_chunk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	unsigned long		reply_chunk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	unsigned long long	total_rdma_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	/* rarely accessed error counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	unsigned long long	pullup_copy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	unsigned long		hardway_register_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	unsigned long		failed_marshal_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	unsigned long		bad_reply_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	unsigned long		mrs_recycled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	unsigned long		mrs_orphaned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	unsigned long		mrs_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	unsigned long		empty_sendctx_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	/* accessed when receiving a reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	unsigned long long	total_rdma_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	unsigned long long	fixup_copy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	unsigned long		reply_waits_for_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	unsigned long		local_inv_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	unsigned long		nomsg_call_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	unsigned long		bcall_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * RPCRDMA transport -- encapsulates the structures above for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * integration with RPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  * The contained structures are embedded, not pointers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  * for convenience. This structure need not be visible externally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  * It is allocated and initialized during mount, and released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  * during unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct rpcrdma_xprt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	struct rpc_xprt		rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	struct rpcrdma_ep	*rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	struct rpcrdma_buffer	rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct delayed_work	rx_connect_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	struct rpc_timeout	rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct rpcrdma_stats	rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static inline const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static inline const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Setting this to 0 ensures interoperability with early servers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * Setting this to 1 enhances certain unaligned read/write performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) extern int xprt_rdma_pad_optimize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* This setting controls the hunt for a supported memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * registration strategy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) extern unsigned int xprt_rdma_memreg_strategy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * Endpoint calls - xprtrdma/verbs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * Buffer calls - xprtrdma/verbs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 				       gfp_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) void rpcrdma_req_destroy(struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int rpcrdma_buffer_create(struct rpcrdma_xprt *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void rpcrdma_mr_put(struct rpcrdma_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			    gfp_t flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			      struct rpcrdma_regbuf *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * rpcrdma_regbuf_is_mapped - check if buffer is DMA mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * Returns true if the buffer is now mapped to rb->rg_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	return rb->rg_device != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * rpcrdma_regbuf_dma_map - DMA-map a regbuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  * @r_xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)  * @rb: regbuf to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)  * Returns true if the buffer is currently DMA mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 					  struct rpcrdma_regbuf *rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	if (likely(rpcrdma_regbuf_is_mapped(rb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	return __rpcrdma_regbuf_dma_map(r_xprt, rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  * Wrappers for chunk registration, shared by read/write chunk code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static inline enum dma_data_direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rpcrdma_data_dir(bool writing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Memory registration calls xprtrdma/frwr_ops.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) void frwr_reset(struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void frwr_release_mr(struct rpcrdma_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 				struct rpcrdma_mr_seg *seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 				int nsegs, bool writing, __be32 xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 				struct rpcrdma_mr *mr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) enum rpcrdma_chunktype {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	rpcrdma_noch = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	rpcrdma_noch_pullup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	rpcrdma_noch_mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	rpcrdma_readch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	rpcrdma_areadch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	rpcrdma_writech,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	rpcrdma_replych
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			      struct rpcrdma_req *req, u32 hdrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			      struct xdr_buf *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			      enum rpcrdma_chunktype rtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	xdr->head[0].iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	xdr->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* RPC/RDMA module init - xprtrdma/transport.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) extern unsigned int xprt_rdma_max_inline_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) extern unsigned int xprt_rdma_max_inline_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void xprt_rdma_close(struct rpc_xprt *xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int xprt_rdma_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void xprt_rdma_cleanup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Backchannel calls - xprtrdma/backchannel.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) extern struct xprt_class xprt_rdma_bc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #endif				/* _LINUX_SUNRPC_XPRT_RDMA_H */