Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This software is available to you under a choice of one of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * licenses.  You may choose to be licensed under the terms of the GNU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * General Public License (GPL) Version 2, available from the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * COPYING in the main directory of this source tree, or the BSD-type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * license below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Redistribution and use in source and binary forms, with or without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * modification, are permitted provided that the following conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * are met:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *      Redistributions of source code must retain the above copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *      notice, this list of conditions and the following disclaimer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *      Redistributions in binary form must reproduce the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *      copyright notice, this list of conditions and the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *      disclaimer in the documentation and/or other materials provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *      with the distribution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *      Neither the name of the Network Appliance, Inc. nor the names of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *      its contributors may be used to endorse or promote products
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *      derived from this software without specific prior written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *      permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * transport.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * This file contains the top-level implementation of an RPC RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * Naming convention: functions beginning with xprt_ are part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * transport switch. All others are RPC RDMA internal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #include <linux/sunrpc/svc_rdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #include "xprt_rdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #include <trace/events/rpcrdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) # define RPCDBG_FACILITY	RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * tunables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) unsigned int xprt_rdma_memreg_strategy		= RPCRDMA_FRWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) int xprt_rdma_pad_optimize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static unsigned int min_inline_size = RPCRDMA_MIN_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static unsigned int max_inline_size = RPCRDMA_MAX_INLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static unsigned int max_padding = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static unsigned int max_memreg = RPCRDMA_LAST - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static unsigned int dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static struct ctl_table_header *sunrpc_table_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static struct ctl_table xr_tunables_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		.procname	= "rdma_slot_table_entries",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		.data		= &xprt_rdma_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		.extra1		= &min_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		.extra2		= &max_slot_table_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		.procname	= "rdma_max_inline_read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		.data		= &xprt_rdma_max_inline_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		.extra1		= &min_inline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		.extra2		= &max_inline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		.procname	= "rdma_max_inline_write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		.data		= &xprt_rdma_max_inline_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		.extra1		= &min_inline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		.extra2		= &max_inline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		.procname	= "rdma_inline_write_padding",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		.data		= &dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		.extra1		= SYSCTL_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		.extra2		= &max_padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		.procname	= "rdma_memreg_strategy",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		.data		= &xprt_rdma_memreg_strategy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		.extra1		= &min_memreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		.extra2		= &max_memreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		.procname	= "rdma_pad_optimize",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		.data		= &xprt_rdma_pad_optimize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		.proc_handler	= proc_dointvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static struct ctl_table sunrpc_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		.procname	= "sunrpc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		.mode		= 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		.child		= xr_tunables_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static const struct rpc_xprt_ops xprt_rdma_procs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct sockaddr_in *sin = (struct sockaddr_in *)sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	char buf[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	char buf[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	switch (sap->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		xprt_rdma_format_addresses4(xprt, sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		xprt_rdma_format_addresses6(xprt, sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		pr_err("rpcrdma: Unrecognized address family\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	(void)rpc_ntop(sap, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) xprt_rdma_free_addresses(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	for (i = 0; i < RPC_DISPLAY_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		case RPC_DISPLAY_PROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		case RPC_DISPLAY_NETID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			kfree(xprt->address_strings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * xprt_rdma_connect_worker - establish connection in the background
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * @work: worker thread context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * Requester holds the xprt's send lock to prevent activity on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * transport while a fresh connection is being established. RPC tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * sleep on the xprt's pending queue waiting for connect to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) xprt_rdma_connect_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 						   rx_connect_worker.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	rc = rpcrdma_xprt_connect(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		xprt->connect_cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		xprt->stat.connect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		xprt->stat.connect_time += (long)jiffies -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 					   xprt->stat.connect_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		xprt_set_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		rpcrdma_xprt_disconnect(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	xprt_unlock_connect(xprt, r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	xprt_wake_pending_tasks(xprt, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * xprt_rdma_inject_disconnect - inject a connection fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * @xprt: transport context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * If @xprt is connected, disconnect it to simulate spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * connection loss. Caller must hold @xprt's send lock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * ensure that data structures and hardware resources are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * stable during the rdma_disconnect() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	trace_xprtrdma_op_inject_dsc(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	rdma_disconnect(r_xprt->rx_ep->re_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * xprt_rdma_destroy - Full tear down of transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  * @xprt: doomed transport context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * Caller guarantees there will be no more calls to us with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * this @xprt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) xprt_rdma_destroy(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	rpcrdma_xprt_disconnect(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	rpcrdma_buffer_destroy(&r_xprt->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	xprt_rdma_free_addresses(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* 60 second timeout, no retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static const struct rpc_timeout xprt_rdma_default_timeout = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	.to_initval = 60 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	.to_maxval = 60 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * xprt_setup_rdma - Set up transport to use RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * @args: rpc transport arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static struct rpc_xprt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) xprt_setup_rdma(struct xprt_create *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct rpcrdma_xprt *new_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct sockaddr *sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (args->addrlen > sizeof(xprt->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (!try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			  xprt_rdma_slot_table_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	xprt->timeout = &xprt_rdma_default_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	xprt->connect_timeout = xprt->timeout->to_initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	xprt->bind_timeout = RPCRDMA_BIND_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	xprt->resvport = 0;		/* privileged port not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	xprt->ops = &xprt_rdma_procs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 * Set up RDMA-specific connect data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	sap = args->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	/* Ensure xprt->addr holds valid server TCP (not RDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 * address, for any side protocols which peek at it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	xprt->prot = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	xprt->addrlen = args->addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	memcpy(&xprt->addr, sap, xprt->addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (rpc_get_port(sap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	xprt_rdma_format_addresses(xprt, sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	new_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	rc = rpcrdma_buffer_create(new_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		xprt_rdma_free_addresses(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			  xprt_rdma_connect_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * xprt_rdma_close - close a transport connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * @xprt: transport context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * Called during autoclose or device removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * Caller holds @xprt's send lock to prevent activity on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * transport while the connection is torn down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void xprt_rdma_close(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	rpcrdma_xprt_disconnect(r_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	xprt->reestablish_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	++xprt->connect_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	xprt_disconnect_done(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * xprt_rdma_set_port - update server port with rpcbind result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * @xprt: controlling RPC transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * @port: new port value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * Transport connect status is unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	char buf[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	rpc_set_port(sap, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	snprintf(buf, sizeof(buf), "%u", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	snprintf(buf, sizeof(buf), "%4hx", port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  * xprt_rdma_timer - invoked when an RPC times out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * @xprt: controlling RPC transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  * @task: RPC task that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  * Invoked when the transport is still connected, but an RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)  * retransmit timeout occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * Since RDMA connections don't have a keep-alive, forcibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  * disconnect and retry to connect. This drives full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * detection of the network path, and retransmissions of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * all pending RPCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	xprt_force_disconnect(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * @xprt: controlling transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  * @connect_timeout: reconnect timeout after client disconnects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * @reconnect_timeout: reconnect timeout after server disconnects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 					  unsigned long connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 					  unsigned long reconnect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	if (connect_timeout < xprt->connect_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		struct rpc_timeout to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		unsigned long initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		to = *xprt->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		initval = connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		if (initval < RPCRDMA_INIT_REEST_TO << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			initval = RPCRDMA_INIT_REEST_TO << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		to.to_initval = initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		to.to_maxval = initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		r_xprt->rx_timeout = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		xprt->timeout = &r_xprt->rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		xprt->connect_timeout = connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (reconnect_timeout < xprt->max_reconnect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		xprt->max_reconnect_timeout = reconnect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  * xprt_rdma_connect - schedule an attempt to reconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  * @xprt: transport state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  * @task: RPC scheduler context (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (ep && ep->re_connect_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		delay = xprt_reconnect_delay(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	trace_xprtrdma_op_connect(r_xprt, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			   delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)  * xprt_rdma_alloc_slot - allocate an rpc_rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)  * @xprt: controlling RPC transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  * @task: RPC task requesting a fresh rpc_rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  * tk_status values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)  *	%0 if task->tk_rqstp points to a fresh rpc_rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  *	%-EAGAIN if no rpc_rqst is available; queued on backlog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	struct rpcrdma_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	req = rpcrdma_buffer_get(&r_xprt->rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		goto out_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	task->tk_rqstp = &req->rl_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) out_sleep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	task->tk_status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	xprt_add_backlog(xprt, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  * xprt_rdma_free_slot - release an rpc_rqst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  * @xprt: controlling RPC transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  * @rqst: rpc_rqst to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	struct rpcrdma_xprt *r_xprt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		container_of(xprt, struct rpcrdma_xprt, rx_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (!xprt_wake_up_backlog(xprt, rqst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		memset(rqst, 0, sizeof(*rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 				 struct rpcrdma_regbuf *rb, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 				 gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (unlikely(rdmab_length(rb) < size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		if (!rpcrdma_regbuf_realloc(rb, size, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		r_xprt->rx_stats.hardway_register_count += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  * xprt_rdma_allocate - allocate transport resources for an RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)  * @task: RPC task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)  *        0:	Success; rq_buffer points to RPC buffer to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  *   ENOMEM:	Out of memory, call again later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  *      EIO:	A permanent error occurred, do not retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) xprt_rdma_allocate(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	struct rpc_rqst *rqst = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	gfp_t flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	flags = RPCRDMA_DEF_GFP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	if (RPC_IS_SWAPPER(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 				  flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 				  flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	rqst->rq_buffer = rdmab_data(req->rl_sendbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  * xprt_rdma_free - release resources allocated by xprt_rdma_allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * @task: RPC task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  * Caller guarantees rqst->rq_buffer is non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) xprt_rdma_free(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	struct rpc_rqst *rqst = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (!list_empty(&req->rl_registered))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		frwr_unmap_sync(r_xprt, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	/* XXX: If the RPC is completing because of a signal and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	 * not because a reply was received, we ought to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * that the Send completion has fired, so that memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * involved with the Send is not still visible to the NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)  * xprt_rdma_send_request - marshal and send an RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)  * @rqst: RPC message in rq_snd_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)  * Caller holds the transport's write lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)  *	%0 if the RPC message has been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)  *	%-ENOTCONN if the caller should reconnect and call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)  *	%-EAGAIN if the caller should call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)  *	%-ENOBUFS if the caller should call again after a delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)  *	%-EMSGSIZE if encoding ran out of buffer space. The request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)  *		was not sent. Do not try to send this message again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)  *	%-EIO if an I/O error occurred. The request was not sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)  *		Do not try to send this message again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) xprt_rdma_send_request(struct rpc_rqst *rqst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	struct rpc_xprt *xprt = rqst->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	if (unlikely(!rqst->rq_buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		return xprt_rdma_bc_send_reply(rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	if (!xprt_connected(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	if (!xprt_request_get_cong(xprt, rqst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		return -EBADSLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	rc = rpcrdma_marshal_req(r_xprt, rqst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		goto failed_marshal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	/* Must suppress retransmit to maintain credits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	if (rqst->rq_connect_cookie == xprt->connect_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		goto drop_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	rqst->rq_xtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	if (rpcrdma_post_sends(r_xprt, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		goto drop_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	/* An RPC with no reply will throw off credit accounting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	 * so drop the connection to reset the credit grant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	if (!rpc_reply_expected(rqst->rq_task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		goto drop_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) failed_marshal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	if (rc != -ENOTCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) drop_connection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	xprt_rdma_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	long idle_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	if (xprt_connected(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	seq_puts(seq, "\txprt:\trdma ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		   0,	/* need a local port? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		   xprt->stat.bind_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		   xprt->stat.connect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		   xprt->stat.connect_time / HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		   idle_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		   xprt->stat.sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		   xprt->stat.recvs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		   xprt->stat.bad_xids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		   xprt->stat.req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		   xprt->stat.bklog_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		   r_xprt->rx_stats.read_chunk_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		   r_xprt->rx_stats.write_chunk_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		   r_xprt->rx_stats.reply_chunk_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		   r_xprt->rx_stats.total_rdma_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		   r_xprt->rx_stats.total_rdma_reply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		   r_xprt->rx_stats.pullup_copy_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		   r_xprt->rx_stats.fixup_copy_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		   r_xprt->rx_stats.hardway_register_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		   r_xprt->rx_stats.failed_marshal_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		   r_xprt->rx_stats.bad_reply_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		   r_xprt->rx_stats.nomsg_call_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		   r_xprt->rx_stats.mrs_recycled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		   r_xprt->rx_stats.mrs_orphaned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		   r_xprt->rx_stats.mrs_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		   r_xprt->rx_stats.local_inv_needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		   r_xprt->rx_stats.empty_sendctx_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		   r_xprt->rx_stats.reply_waits_for_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) xprt_rdma_enable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) xprt_rdma_disable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)  * Plumbing for rpc transport switch and kernel module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static const struct rpc_xprt_ops xprt_rdma_procs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	.reserve_xprt		= xprt_reserve_xprt_cong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	.release_xprt		= xprt_release_xprt_cong, /* sunrpc/xprt.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	.alloc_slot		= xprt_rdma_alloc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	.free_slot		= xprt_rdma_free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	.release_request	= xprt_release_rqst_cong,       /* ditto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	.wait_for_reply_request	= xprt_wait_for_reply_request_def, /* ditto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	.timer			= xprt_rdma_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	.rpcbind		= rpcb_getport_async,	/* sunrpc/rpcb_clnt.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	.set_port		= xprt_rdma_set_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	.connect		= xprt_rdma_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	.buf_alloc		= xprt_rdma_allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	.buf_free		= xprt_rdma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	.send_request		= xprt_rdma_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	.close			= xprt_rdma_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	.destroy		= xprt_rdma_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	.set_connect_timeout	= xprt_rdma_set_connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	.print_stats		= xprt_rdma_print_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	.enable_swap		= xprt_rdma_enable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	.disable_swap		= xprt_rdma_disable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	.inject_disconnect	= xprt_rdma_inject_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	.bc_setup		= xprt_rdma_bc_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	.bc_maxpayload		= xprt_rdma_bc_maxpayload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	.bc_num_slots		= xprt_rdma_bc_max_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	.bc_free_rqst		= xprt_rdma_bc_free_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	.bc_destroy		= xprt_rdma_bc_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static struct xprt_class xprt_rdma = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	.list			= LIST_HEAD_INIT(xprt_rdma.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	.name			= "rdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	.owner			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	.ident			= XPRT_TRANSPORT_RDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	.setup			= xprt_setup_rdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	.netid			= { "rdma", "rdma6", "" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) void xprt_rdma_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	if (sunrpc_table_header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		unregister_sysctl_table(sunrpc_table_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		sunrpc_table_header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	xprt_unregister_transport(&xprt_rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	xprt_unregister_transport(&xprt_rdma_bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) int xprt_rdma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	rc = xprt_register_transport(&xprt_rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	rc = xprt_register_transport(&xprt_rdma_bc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		xprt_unregister_transport(&xprt_rdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	if (!sunrpc_table_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		sunrpc_table_header = register_sysctl_table(sunrpc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }