Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * linux/net/sunrpc/xprtsock.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Client-side transport implementation for sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * TCP callback races fixes (C) 1998 Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * TCP send fixes (C) 1998 Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * TCP NFS related read + write fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Rewrite of larges part of the code in order to stabilize TCP stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Fix behaviour when socket buffer is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *   <gilles.quillard@bull.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/un.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/sunrpc/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/sunrpc/svcsock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/sunrpc/xprtsock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #ifdef CONFIG_SUNRPC_BACKCHANNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/sunrpc/bc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/bvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include "socklib.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include "sunrpc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static void xs_close(struct rpc_xprt *xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		struct socket *sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * xprtsock tunables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define XS_TCP_LINGER_TO	(15U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * We can register our own files under /proc/sys/sunrpc by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * calling register_sysctl_table() again.  The files in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * directory become the union of all files registered there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * We simply need to make sure that we don't collide with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * someone else's file names!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static struct ctl_table_header *sunrpc_table_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * FIXME: changing the UDP slot table size should also resize the UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  *        socket buffers for existing UDP transports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static struct ctl_table xs_tunables_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		.procname	= "udp_slot_table_entries",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		.data		= &xprt_udp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		.extra1		= &min_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		.extra2		= &max_slot_table_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		.procname	= "tcp_slot_table_entries",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		.data		= &xprt_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		.extra1		= &min_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		.extra2		= &max_slot_table_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		.procname	= "tcp_max_slot_table_entries",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		.data		= &xprt_max_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		.extra1		= &min_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		.extra2		= &max_tcp_slot_table_limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		.procname	= "min_resvport",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		.data		= &xprt_min_resvport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		.extra1		= &xprt_min_resvport_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		.extra2		= &xprt_max_resvport_limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		.procname	= "max_resvport",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		.data		= &xprt_max_resvport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		.maxlen		= sizeof(unsigned int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		.proc_handler	= proc_dointvec_minmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		.extra1		= &xprt_min_resvport_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		.extra2		= &xprt_max_resvport_limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		.procname	= "tcp_fin_timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		.data		= &xs_tcp_fin_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		.maxlen		= sizeof(xs_tcp_fin_timeout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		.mode		= 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		.proc_handler	= proc_dointvec_jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static struct ctl_table sunrpc_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		.procname	= "sunrpc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		.mode		= 0555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		.child		= xs_tunables_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * Wait duration for a reply from the RPC portmapper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define XS_BIND_TO		(60U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * Delay if a UDP socket connect error occurs.  This is most likely some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * kind of resource problem on the local host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define XS_UDP_REEST_TO		(2U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * The reestablish timeout allows clients to delay for a bit before attempting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * to reconnect to a server that just dropped our connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * We implement an exponential backoff when trying to reestablish a TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * transport connection with the server.  Some servers like to drop a TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * connection when they are overworked, so we start with a short timeout and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * increase over time if the server is down or not responding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define XS_TCP_INIT_REEST_TO	(3U * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * TCP idle timeout; client drops the transport socket if it is idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * for this long.  Note that we also timeout UDP sockets to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * holding port numbers when there is no RPC traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define XS_IDLE_DISC_TO		(5U * 60 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) # undef  RPC_DEBUG_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) # define RPCDBG_FACILITY	RPCDBG_TRANS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) #ifdef RPC_DEBUG_DATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	u8 *buf = (u8 *) packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	dprintk("RPC:       %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	for (j = 0; j < count && j < 128; j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		if (!(j & 31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			if (j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 				dprintk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			dprintk("0x%04x ", j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		dprintk("%02x%02x%02x%02x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			buf[j], buf[j+1], buf[j+2], buf[j+3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	dprintk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	/* NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	return (struct rpc_xprt *) sk->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	return (struct sockaddr *) &xprt->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return (struct sockaddr_un *) &xprt->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	return (struct sockaddr_in *) &xprt->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return (struct sockaddr_in6 *) &xprt->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	struct sockaddr *sap = xs_addr(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	struct sockaddr_in6 *sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct sockaddr_in *sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct sockaddr_un *sun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	switch (sap->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	case AF_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		sun = xs_addr_un(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		strlcpy(buf, sun->sun_path, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		xprt->address_strings[RPC_DISPLAY_ADDR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 						kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		(void)rpc_ntop(sap, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		xprt->address_strings[RPC_DISPLAY_ADDR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 						kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		sin = xs_addr_in(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		(void)rpc_ntop(sap, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		xprt->address_strings[RPC_DISPLAY_ADDR] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 						kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		sin6 = xs_addr_in6(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct sockaddr *sap = xs_addr(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) static void xs_format_peer_addresses(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 				     const char *protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				     const char *netid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	xs_format_common_peer_addresses(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	xs_format_common_peer_ports(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static void xs_update_peer_port(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	xs_format_common_peer_ports(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) static void xs_free_peer_addresses(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	for (i = 0; i < RPC_DISPLAY_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		case RPC_DISPLAY_PROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		case RPC_DISPLAY_NETID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 			kfree(xprt->address_strings[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	size_t i,n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		if (buf->pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		if (!buf->pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			i *= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			return i > buf->page_base ? i - buf->page_base : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	return want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	if (seek != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		iov_iter_advance(&msg->msg_iter, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	ret = sock_recvmsg(sock, msg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return ret > 0 ? ret + seek : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		struct kvec *kvec, size_t count, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	return xs_sock_recvmsg(sock, msg, flags, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		struct bio_vec *bvec, unsigned long nr, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	return xs_sock_recvmsg(sock, msg, flags, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	iov_iter_discard(&msg->msg_iter, READ, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return sock_recvmsg(sock, msg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct bvec_iter bi = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		.bi_size = count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	for_each_bvec(bv, bvec, bi, bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		flush_dcache_page(bv.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	size_t want, seek_init = seek, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	want = min_t(size_t, count, buf->head[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (seek < want) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			goto sock_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (ret != want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		seek = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		seek -= want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		offset += want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	want = xs_alloc_sparse_pages(buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			min_t(size_t, count - offset, buf->page_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (seek < want) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ret = xs_read_bvec(sock, msg, flags, buf->bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 				xdr_buf_pagecount(buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				want + buf->page_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				seek + buf->page_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			goto sock_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		ret -= buf->page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		if (ret != want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		seek = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		seek -= want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		offset += want;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	want = min_t(size_t, count - offset, buf->tail[0].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if (seek < want) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			goto sock_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		if (ret != want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	} else if (offset < seek_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		offset = seek_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	*read = offset - seek_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) sock_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	offset += seek;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	if (!transport->recv.copied) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if (buf->head[0].iov_len >= transport->recv.offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			memcpy(buf->head[0].iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 					&transport->recv.xid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 					transport->recv.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		transport->recv.copied = transport->recv.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) xs_read_stream_request_done(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) xs_read_stream_check_eor(struct sock_xprt *transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		struct msghdr *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (xs_read_stream_request_done(transport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		msg->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		int flags, struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct xdr_buf *buf = &req->rq_private_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	size_t want, read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	xs_read_header(transport, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	want = transport->recv.len - transport->recv.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (want != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 				transport->recv.copied + want,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				transport->recv.copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 				&read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		transport->recv.offset += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		transport->recv.copied += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (transport->recv.offset == transport->recv.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		xs_read_stream_check_eor(transport, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (want == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	case -EFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	case -EMSGSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	return ret < 0 ? ret : read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) xs_read_stream_headersize(bool isfrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (isfrag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	return 3 * sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		int flags, size_t want, size_t seek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	struct kvec kvec = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		.iov_base = &transport->recv.fraghdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		.iov_len = want,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct rpc_rqst *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* Look up and lock the request corresponding to the given XID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		printk(KERN_WARNING "Callback slot table overflowed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (transport->recv.copied && !req->rq_private_buf.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	ret = xs_read_stream_request(transport, msg, flags, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		xprt_complete_bc_request(req, transport->recv.copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		req->rq_private_buf.len = transport->recv.copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) #else /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) #endif /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct rpc_rqst *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	/* Look up and lock the request corresponding to the given XID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	spin_lock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	req = xprt_lookup_rqst(xprt, transport->recv.xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	xprt_pin_rqst(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_unlock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	ret = xs_read_stream_request(transport, msg, flags, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	spin_lock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		xprt_complete_rqst(req->rq_task, transport->recv.copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		req->rq_private_buf.len = transport->recv.copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	xprt_unpin_rqst(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	spin_unlock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) xs_read_stream(struct sock_xprt *transport, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	struct msghdr msg = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	size_t want, read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (transport->recv.len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		want = xs_read_stream_headersize(transport->recv.copied != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		ret = xs_read_stream_header(transport, &msg, flags, want,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 				transport->recv.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		transport->recv.offset = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		if (transport->recv.offset != want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			return transport->recv.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			RPC_FRAGMENT_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		transport->recv.offset -= sizeof(transport->recv.fraghdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		read = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	switch (be32_to_cpu(transport->recv.calldir)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		msg.msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	case RPC_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		ret = xs_read_stream_call(transport, &msg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	case RPC_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		ret = xs_read_stream_reply(transport, &msg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (msg.msg_flags & MSG_TRUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		transport->recv.calldir = cpu_to_be32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		transport->recv.copied = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	read += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (transport->recv.offset < transport->recv.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (!(msg.msg_flags & MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		msg.msg_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		ret = xs_read_discard(transport->sock, &msg, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 				transport->recv.len - transport->recv.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		transport->recv.offset += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		read += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		if (transport->recv.offset != transport->recv.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (xs_read_stream_request_done(transport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		trace_xs_stream_read_request(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		transport->recv.copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	transport->recv.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	transport->recv.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	return ret != 0 ? ret : -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static __poll_t xs_poll_socket(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	return transport->sock->ops->poll(transport->file, transport->sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) static bool xs_poll_socket_readable(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	__poll_t events = xs_poll_socket(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static void xs_poll_check_readable(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (!xs_poll_socket_readable(transport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		queue_work(xprtiod_workqueue, &transport->recv_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static void xs_stream_data_receive(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	size_t read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	mutex_lock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (transport->sock == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		ret = xs_read_stream(transport, MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		read += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (ret == -ESHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		kernel_sock_shutdown(transport->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		xs_poll_check_readable(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	mutex_unlock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	trace_xs_stream_read_data(&transport->xprt, ret, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static void xs_stream_data_receive_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		container_of(work, struct sock_xprt, recv_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	unsigned int pflags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	xs_stream_data_receive(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	memalloc_nofs_restore(pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) xs_stream_reset_connect(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	transport->recv.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	transport->recv.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	transport->recv.copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	transport->xmit.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) xs_stream_start_connect(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	transport->xprt.stat.connect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	transport->xprt.stat.connect_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) #define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * xs_nospace - handle transmit was incomplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * @req: pointer to RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static int xs_nospace(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct sock *sk = transport->inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	int ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	trace_rpc_socket_nospace(req, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* Protect against races with write_space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* Don't race with disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (xprt_connected(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		/* wait for more buffer space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		sk->sk_write_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		xprt_wait_for_buffer_space(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	/* Race breaker in case memory is freed before above code is called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) xs_stream_prepare_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	xdr_free_bvec(&req->rq_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  * Determine if the previous message in the stream was aborted before it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * could complete transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * Return the stream record marker field for a record of length < 2^31-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static rpc_fraghdr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) xs_stream_record_marker(struct xdr_buf *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (!xdr->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * xs_local_send_request - write an RPC request to an AF_LOCAL socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * @req: pointer to RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  *        0:	The request has been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  *   EAGAIN:	The socket was blocked, please call again later to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  *		complete the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * ENOTCONN:	Caller needs to invoke connect logic then call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  *    other:	Some other error occured, the request was not sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static int xs_local_send_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 				container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	struct xdr_buf *xdr = &req->rq_snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	rpc_fraghdr rm = xs_stream_record_marker(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		.msg_flags	= XS_SENDMSG_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	unsigned int sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	/* Close the stream if the previous transmission was incomplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (xs_send_request_was_aborted(transport, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		xs_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	xs_pktdump("packet data:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			req->rq_svec->iov_base, req->rq_svec->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	req->rq_xtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				   transport->xmit.offset, rm, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	dprintk("RPC:       %s(%u) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			__func__, xdr->len - transport->xmit.offset, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if (status == -EAGAIN && sock_writeable(transport->inet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		status = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (likely(sent > 0) || status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		transport->xmit.offset += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		req->rq_bytes_sent = transport->xmit.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (likely(req->rq_bytes_sent >= msglen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			req->rq_xmit_bytes_sent += transport->xmit.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			transport->xmit.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		status = xs_nospace(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			-status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		xs_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * xs_udp_send_request - write an RPC request to a UDP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * @req: pointer to RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  *        0:	The request has been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  *   EAGAIN:	The socket was blocked, please call again later to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *		complete the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * ENOTCONN:	Caller needs to invoke connect logic then call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  *    other:	Some other error occurred, the request was not sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static int xs_udp_send_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct xdr_buf *xdr = &req->rq_snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		.msg_name	= xs_addr(xprt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		.msg_namelen	= xprt->addrlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		.msg_flags	= XS_SENDMSG_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	unsigned int sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	xs_pktdump("packet data:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 				req->rq_svec->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 				req->rq_svec->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	if (!xprt_bound(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (!xprt_request_get_cong(xprt, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		return -EBADSLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	req->rq_xtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			xdr->len, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (status == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		goto process_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (status == -EAGAIN && sock_writeable(transport->inet))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		status = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (sent > 0 || status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		req->rq_xmit_bytes_sent += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		if (sent >= req->rq_slen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		/* Still some bytes left; set up for a retry later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) process_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	case -ENOTSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		/* Should we call xs_close() here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		status = xs_nospace(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		/* When the server has died, an ICMP port unreachable message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		 * prompts ECONNREFUSED. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			-status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * xs_tcp_send_request - write an RPC request to a TCP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * @req: pointer to RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  *        0:	The request has been sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  *   EAGAIN:	The socket was blocked, please call again later to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  *		complete the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * ENOTCONN:	Caller needs to invoke connect logic then call again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  *    other:	Some other error occurred, the request was not sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  * XXX: In the case of soft timeouts, should we eventually give up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  *	if sendmsg is not able to make progress?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) static int xs_tcp_send_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct xdr_buf *xdr = &req->rq_snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	rpc_fraghdr rm = xs_stream_record_marker(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		.msg_flags	= XS_SENDMSG_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	bool vm_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	unsigned int sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/* Close the stream if the previous transmission was incomplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (xs_send_request_was_aborted(transport, req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		if (transport->sock != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			kernel_sock_shutdown(transport->sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	xs_pktdump("packet data:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 				req->rq_svec->iov_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 				req->rq_svec->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		xs_tcp_set_socket_timeouts(xprt, transport->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/* Continue transmitting the packet/record. We must be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * to cope with writespace callbacks arriving _after_ we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * called sendmsg(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	req->rq_xtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 					   transport->xmit.offset, rm, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				xdr->len - transport->xmit.offset, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		/* If we've sent the entire packet, immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		 * reset the count of bytes sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		transport->xmit.offset += sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		req->rq_bytes_sent = transport->xmit.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		if (likely(req->rq_bytes_sent >= msglen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			req->rq_xmit_bytes_sent += transport->xmit.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			transport->xmit.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		WARN_ON_ONCE(sent == 0 && status == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if (status == -EAGAIN ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			 * Return EAGAIN if we're sure we're hitting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			 * socket send buffer limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			 * Did we hit a memory allocation failure?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			if (sent == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				status = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				if (vm_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				/* Retry, knowing now that we're below the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				 * socket send buffer limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				vm_wait = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		vm_wait = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	case -ENOTSOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		/* Should we call xs_close() here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		status = xs_nospace(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			-status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	transport->old_data_ready = sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	transport->old_state_change = sk->sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	transport->old_write_space = sk->sk_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	transport->old_error_report = sk->sk_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	sk->sk_data_ready = transport->old_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	sk->sk_state_change = transport->old_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	sk->sk_write_space = transport->old_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	sk->sk_error_report = transport->old_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	set_bit(nr, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	queue_work(xprtiod_workqueue, &transport->error_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	clear_bit(XPRT_CLOSING, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	xs_sock_reset_state_flags(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * xs_error_report - callback to handle TCP socket state errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * Note: we don't call sock_error() since there may be a rpc_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * using the socket, and so we don't want to clear sk->sk_err.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static void xs_error_report(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (!(xprt = xprt_from_sock(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	transport->xprt_err = -sk->sk_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	if (transport->xprt_err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			xprt, -transport->xprt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static void xs_reset_transport(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	struct socket *sock = transport->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	struct sock *sk = transport->inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	struct file *filp = transport->file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (atomic_read(&transport->xprt.swapper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		sk_clear_memalloc(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	kernel_sock_shutdown(sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	mutex_lock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	transport->inet = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	transport->sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	transport->file = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	xs_restore_old_callbacks(transport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	xprt_clear_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	xs_sock_reset_connection_flags(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/* Reset stream record info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	xs_stream_reset_connect(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	mutex_unlock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	trace_rpc_socket_close(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	fput(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	xprt_disconnect_done(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  * xs_close - close a socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  * @xprt: transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  * This is used when all requests are complete; ie, no DRC state remains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * on the server we want to save.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)  * xs_reset_transport() zeroing the socket from underneath a writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static void xs_close(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	dprintk("RPC:       xs_close xprt %p\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	xs_reset_transport(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	xprt->reestablish_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static void xs_inject_disconnect(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	xprt_disconnect_done(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void xs_xprt_free(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	xs_free_peer_addresses(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * xs_destroy - prepare to shutdown a transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  * @xprt: doomed transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static void xs_destroy(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct sock_xprt *transport = container_of(xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	cancel_delayed_work_sync(&transport->connect_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	xs_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	cancel_work_sync(&transport->recv_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	cancel_work_sync(&transport->error_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * xs_udp_data_read_skb - receive callback for UDP sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * @xprt: transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * @sk: socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * @skb: skbuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct rpc_rqst *rovr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	int repsize, copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	u32 _xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	__be32 *xp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	repsize = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (repsize < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	/* Copy the XID from the skb... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (xp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	/* Look up and lock the request corresponding to the given XID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	spin_lock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	rovr = xprt_lookup_rqst(xprt, *xp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (!rovr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	xprt_pin_rqst(rovr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	xprt_update_rtt(rovr->rq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	spin_unlock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	task = rovr->rq_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if ((copied = rovr->rq_private_buf.buflen) > repsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		copied = repsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	/* Suck it into the iovec, verify checksum if not done by hw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		spin_lock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		goto out_unpin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	xprt_adjust_cwnd(xprt, task, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	spin_lock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	xprt_complete_rqst(task, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) out_unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	xprt_unpin_rqst(rovr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	spin_unlock(&xprt->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static void xs_udp_data_receive(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	mutex_lock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	sk = transport->inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (sk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		skb = skb_recv_udp(sk, 0, 1, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		xs_udp_data_read_skb(&transport->xprt, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	xs_poll_check_readable(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	mutex_unlock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static void xs_udp_data_receive_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		container_of(work, struct sock_xprt, recv_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	unsigned int pflags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	xs_udp_data_receive(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	memalloc_nofs_restore(pflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * xs_data_ready - "data ready" callback for UDP sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * @sk: socket with data to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static void xs_data_ready(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	dprintk("RPC:       xs_data_ready...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	xprt = xprt_from_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	if (xprt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		struct sock_xprt *transport = container_of(xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		transport->old_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		/* Any data means we had a useful conversation, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		 * then we don't need to delay the next reconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		if (xprt->reestablish_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			xprt->reestablish_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			queue_work(xprtiod_workqueue, &transport->recv_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * Helper function to force a TCP close if the server is sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  * junk and/or it has put us in CLOSE_WAIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static void xs_tcp_force_close(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	xprt_force_disconnect(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	return PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) #endif /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * xs_tcp_state_change - callback to handle TCP socket state changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * @sk: socket whose state has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static void xs_tcp_state_change(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (!(xprt = xprt_from_sock(sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			sk->sk_state, xprt_connected(xprt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			sock_flag(sk, SOCK_DEAD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			sock_flag(sk, SOCK_ZAPPED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			sk->sk_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	trace_rpc_socket_state_change(xprt, sk->sk_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	case TCP_ESTABLISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		if (!xprt_test_and_set_connected(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			xprt->connect_cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			xprt->stat.connect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			xprt->stat.connect_time += (long)jiffies -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 						   xprt->stat.connect_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	case TCP_FIN_WAIT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		/* The client initiated a shutdown of the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		xprt->connect_cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		xprt->reestablish_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		set_bit(XPRT_CLOSING, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		clear_bit(XPRT_CONNECTED, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	case TCP_CLOSE_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		/* The server initiated a shutdown of the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		xprt->connect_cookie++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		clear_bit(XPRT_CONNECTED, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	case TCP_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		 * If the server closed down the connection, make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		 * we back off before reconnecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	case TCP_LAST_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		set_bit(XPRT_CLOSING, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		clear_bit(XPRT_CONNECTED, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	case TCP_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 					&transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 			xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		clear_bit(XPRT_CLOSING, &xprt->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		/* Trigger the socket release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void xs_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (!sk->sk_socket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	if (unlikely(!(xprt = xprt_from_sock(sk))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	sk->sk_write_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)  * xs_udp_write_space - callback invoked when socket buffer space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)  *                             becomes available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)  * @sk: socket whose state has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)  * Called when more output buffer space is available for this socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  * We try not to wake our writers until they can make "significant"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  * with a bunch of small requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static void xs_udp_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	/* from net/core/sock.c:sock_def_write_space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (sock_writeable(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		xs_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)  * xs_tcp_write_space - callback invoked when socket buffer space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  *                             becomes available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  * @sk: socket whose state has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  * Called when more output buffer space is available for this socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  * We try not to wake our writers until they can make "significant"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)  * with a bunch of small requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static void xs_tcp_write_space(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	read_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	/* from net/core/stream.c:sk_stream_write_space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	if (sk_stream_is_writeable(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		xs_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	read_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct sock *sk = transport->inet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	if (transport->rcvsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (transport->sndsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		sk->sk_write_space(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  * xs_udp_set_buffer_size - set send and receive limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * @xprt: generic transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * @sndsize: requested size of send buffer, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * @rcvsize: requested size of receive buffer, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  * Set socket send and receive buffer size limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	transport->sndsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	if (sndsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		transport->sndsize = sndsize + 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	transport->rcvsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (rcvsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		transport->rcvsize = rcvsize + 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	xs_udp_do_set_buffer_size(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * @xprt: controlling transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * @task: task that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * Adjust the congestion window after a retransmit timeout has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static int xs_get_random_port(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	unsigned short range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	unsigned short rand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (max < min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	range = max - min + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	rand = (unsigned short) prandom_u32() % range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	return rand + min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static unsigned short xs_sock_getport(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct sockaddr_storage buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	unsigned short port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	switch (buf.ss_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)  * xs_set_port - reset the port number in the remote endpoint address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * @xprt: generic transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  * @port: new port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	rpc_set_port(xs_addr(xprt), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	xs_update_peer_port(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (transport->srcport == 0 && transport->xprt.reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		transport->srcport = xs_sock_getport(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static int xs_get_srcport(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	int port = transport->srcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (port == 0 && transport->xprt.resvport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		port = xs_get_random_port();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) unsigned short get_srcport(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	return xs_sock_getport(sock->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) EXPORT_SYMBOL(get_srcport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (transport->srcport != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		transport->srcport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	if (!transport->xprt.resvport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	if (port <= xprt_min_resvport || port > xprt_max_resvport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		return xprt_max_resvport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	return --port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static int xs_bind(struct sock_xprt *transport, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	struct sockaddr_storage myaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	int err, nloop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	int port = xs_get_srcport(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	unsigned short last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	 * If we are asking for any ephemeral port (i.e. port == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	 * transport->xprt.resvport == 0), don't bind.  Let the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	 * port selection happen implicitly when the socket is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 * (for example at connect time).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	 * This ensures that we can continue to establish TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	 * connections even when all local ephemeral ports are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	 * a part of some TCP connection.  This makes no difference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	 * for UDP sockets, but also doens't harm them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	 * If we're asking for any reserved port (i.e. port == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	 * transport->xprt.resvport == 1) xs_get_srcport above will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	 * ensure that port is non-zero and we will bind as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (port <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		rpc_set_port((struct sockaddr *)&myaddr, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				transport->xprt.addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			if (transport->xprt.reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 				transport->srcport = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		last = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		port = xs_next_srcport(transport, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		if (port > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			nloop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	} while (err == -EADDRINUSE && nloop != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if (myaddr.ss_family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		dprintk("RPC:       %s %pI4:%u: %s (%d)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				&((struct sockaddr_in *)&myaddr)->sin_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				port, err ? "failed" : "ok", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		dprintk("RPC:       %s %pI6:%u: %s (%d)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				port, err ? "failed" : "ok", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  * We don't support autobind on AF_LOCAL sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static void xs_local_rpcbind(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	xprt_set_bound(task->tk_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) static struct lock_class_key xs_key[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static struct lock_class_key xs_slock_key[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static inline void xs_reclassify_socketu(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		&xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static inline void xs_reclassify_socket4(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static inline void xs_reclassify_socket6(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static inline void xs_reclassify_socket(int family, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	case AF_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		xs_reclassify_socketu(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		xs_reclassify_socket4(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		xs_reclassify_socket6(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static inline void xs_reclassify_socket(int family, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static void xs_dummy_setup_socket(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static struct socket *xs_create_sock(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		struct sock_xprt *transport, int family, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		int protocol, bool reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		dprintk("RPC:       can't create %d transport socket (%d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 				protocol, -err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	xs_reclassify_socket(family, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	if (reuseport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		sock_set_reuseport(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	err = xs_bind(transport, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	if (IS_ERR(filp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		return ERR_CAST(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	transport->file = filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) static int xs_local_finish_connecting(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 				      struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 									xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	if (!transport->inet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		xs_save_old_callbacks(transport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		sk->sk_user_data = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		sk->sk_data_ready = xs_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		sk->sk_write_space = xs_udp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		sock_set_flag(sk, SOCK_FASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		sk->sk_error_report = xs_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		xprt_clear_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		/* Reset to new socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		transport->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		transport->inet = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	xs_stream_start_connect(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * @transport: socket transport to connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static int xs_local_setup_socket(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	status = __sock_create(xprt->xprt_net, AF_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 					SOCK_STREAM, 0, &sock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		dprintk("RPC:       can't create AF_LOCAL "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			"transport socket (%d).\n", -status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	xs_reclassify_socket(AF_LOCAL, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	if (IS_ERR(filp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		status = PTR_ERR(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	transport->file = filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	dprintk("RPC:       worker connecting xprt %p via AF_LOCAL to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	status = xs_local_finish_connecting(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	trace_rpc_socket_connect(xprt, sock, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		dprintk("RPC:       xprt %p connected to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		xprt->stat.connect_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		xprt->stat.connect_time += (long)jiffies -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 					   xprt->stat.connect_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		xprt_set_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		dprintk("RPC:       xprt %p: socket %s does not exist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		dprintk("RPC:       xprt %p: connection refused for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 				__func__, -status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 				xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	xprt_wake_pending_tasks(xprt, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	 if (RPC_IS_ASYNC(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		 * We want the AF_LOCAL connect to be resolved in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		 * filesystem namespace of the process making the rpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		 * call.  Thus we connect synchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		 * If we want to support asynchronous AF_LOCAL calls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		 * we'll need to figure out how to pass a namespace to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		 * connect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		task->tk_rpc_status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		rpc_exit(task, -ENOTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	ret = xs_local_setup_socket(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (ret && !RPC_IS_SOFTCONN(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		msleep_interruptible(15000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  * Note that this should be called with XPRT_LOCKED held (or when we otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  * know that we have exclusive access to the socket), to guard against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * races with xs_reset_transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static void xs_set_memalloc(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 * If there's no sock, then we have nothing to set. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 * reconnecting process will get it for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (!transport->inet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	if (atomic_read(&xprt->swapper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		sk_set_memalloc(transport->inet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  * xs_enable_swap - Tag this transport as being used for swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)  * @xprt: transport to tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  * Take a reference to this transport on behalf of the rpc_clnt, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * optionally mark it for swapping if it wasn't already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) xs_enable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	if (atomic_inc_return(&xprt->swapper) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (xs->inet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		sk_set_memalloc(xs->inet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	xprt_release_xprt(xprt, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)  * xs_disable_swap - Untag this transport as being used for swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)  * @xprt: transport to tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  * swapper refcount goes to 0, untag the socket as a memalloc socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) xs_disable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (!atomic_dec_and_test(&xprt->swapper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	if (xs->inet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		sk_clear_memalloc(xs->inet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	xprt_release_xprt(xprt, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static void xs_set_memalloc(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) xs_enable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) xs_disable_swap(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	if (!transport->inet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		xs_save_old_callbacks(transport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		sk->sk_user_data = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		sk->sk_data_ready = xs_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		sk->sk_write_space = xs_udp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		sock_set_flag(sk, SOCK_FASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		xprt_set_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		/* Reset to new socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		transport->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		transport->inet = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		xs_set_memalloc(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	xs_udp_do_set_buffer_size(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	xprt->stat.connect_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) static void xs_udp_setup_socket(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		container_of(work, struct sock_xprt, connect_worker.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	int status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	sock = xs_create_sock(xprt, transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			xs_addr(xprt)->sa_family, SOCK_DGRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			IPPROTO_UDP, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	if (IS_ERR(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	dprintk("RPC:       worker connecting xprt %p via %s to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 				"%s (port %s)\n", xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			xprt->address_strings[RPC_DISPLAY_PROTO],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			xprt->address_strings[RPC_DISPLAY_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	xs_udp_finish_connecting(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	trace_rpc_socket_connect(xprt, sock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	xprt_unlock_connect(xprt, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	xprt_wake_pending_tasks(xprt, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * xs_tcp_shutdown - gracefully shut down a TCP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  * @xprt: transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * Initiates a graceful shutdown of the TCP socket by calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  * equivalent of shutdown(SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static void xs_tcp_shutdown(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	struct socket *sock = transport->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	if (sock == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	switch (skst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		kernel_sock_shutdown(sock, SHUT_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		trace_rpc_socket_shutdown(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	case TCP_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	case TCP_TIME_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		xs_reset_transport(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	unsigned int keepidle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	unsigned int keepcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	unsigned int timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	keepcnt = xprt->timeout->to_retries + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		(xprt->timeout->to_retries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	/* TCP Keepalive options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	sock_set_keepalive(sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	tcp_sock_set_keepidle(sock->sk, keepidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	tcp_sock_set_keepintvl(sock->sk, keepidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	tcp_sock_set_keepcnt(sock->sk, keepcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	/* TCP user timeout (see RFC5482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	tcp_sock_set_user_timeout(sock->sk, timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		unsigned long connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		unsigned long reconnect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	struct rpc_timeout to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	unsigned long initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	spin_lock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	if (reconnect_timeout < xprt->max_reconnect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		xprt->max_reconnect_timeout = reconnect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (connect_timeout < xprt->connect_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		memcpy(&to, xprt->timeout, sizeof(to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		/* Arbitrary lower limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		if (initval <  XS_TCP_INIT_REEST_TO << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			initval = XS_TCP_INIT_REEST_TO << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		to.to_initval = initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		to.to_maxval = initval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		memcpy(&transport->tcp_timeout, &to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 				sizeof(transport->tcp_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		xprt->timeout = &transport->tcp_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		xprt->connect_timeout = connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	spin_unlock(&xprt->transport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	int ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	if (!transport->inet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		/* Avoid temporary address, they are bad for long-lived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		 * connections such as NFS mounts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		 * RFC4941, section 3.6 suggests that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		 *    Individual applications, which have specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		 *    knowledge about the normal duration of connections,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		 *    MAY override this as appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		if (xs_addr(xprt)->sa_family == PF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			ip6_sock_set_addr_preferences(sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 				IPV6_PREFER_SRC_PUBLIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		xs_tcp_set_socket_timeouts(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		write_lock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		xs_save_old_callbacks(transport, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		sk->sk_user_data = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		sk->sk_data_ready = xs_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		sk->sk_state_change = xs_tcp_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		sk->sk_write_space = xs_tcp_write_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		sock_set_flag(sk, SOCK_FASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		sk->sk_error_report = xs_error_report;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		/* socket options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		sock_reset_flag(sk, SOCK_LINGER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		xprt_clear_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		/* Reset to new socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		transport->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		transport->inet = sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		write_unlock_bh(&sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (!xprt_bound(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	xs_set_memalloc(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	xs_stream_start_connect(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	/* Tell the socket layer to start connecting... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		xs_set_srcport(transport, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		/* SYN_SENT! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	case -EADDRNOTAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		/* Source port number is unavailable. Try a new one! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		transport->srcport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)  * @work: queued work item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)  * Invoked by a work queue tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) static void xs_tcp_setup_socket(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		container_of(work, struct sock_xprt, connect_worker.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	struct socket *sock = transport->sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	struct rpc_xprt *xprt = &transport->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	int status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	if (!sock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		sock = xs_create_sock(xprt, transport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 				xs_addr(xprt)->sa_family, SOCK_STREAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 				IPPROTO_TCP, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		if (IS_ERR(sock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			status = PTR_ERR(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	dprintk("RPC:       worker connecting xprt %p via %s to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 				"%s (port %s)\n", xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 			xprt->address_strings[RPC_DISPLAY_PROTO],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 			xprt->address_strings[RPC_DISPLAY_PORT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	status = xs_tcp_finish_connecting(xprt, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	trace_rpc_socket_connect(xprt, sock, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			xprt, -status, xprt_connected(xprt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			sock->sk->sk_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		printk("%s: connect returned unhandled error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			__func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	case -EADDRNOTAVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		/* We're probably in TIME_WAIT. Get rid of existing socket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		 * and retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		xs_tcp_force_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	case -EINPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	case -EALREADY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		xprt_unlock_connect(xprt, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		/* Happens, for instance, if the user specified a link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		 * local IPv6 address without a scope-id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		 * xs_tcp_force_close() wakes tasks with -EIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		 * We need to wake them first to ensure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		 * correct error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		xprt_wake_pending_tasks(xprt, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		xs_tcp_force_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	xprt_clear_connecting(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	xprt_unlock_connect(xprt, transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	xprt_wake_pending_tasks(xprt, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)  * xs_connect - connect a socket to a remote endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)  * @xprt: pointer to transport structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)  * @task: address of RPC task that manages state of connect request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)  * TCP: If the remote end dropped the connection, delay reconnecting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)  * UDP socket connects are synchronous, but we use a work queue anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)  * to guarantee that even unprivileged user processes can set up a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)  * socket on a privileged port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)  * If a UDP socket connect fails, the delay behavior here prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)  * retry floods (hard mounts).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	unsigned long delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	if (transport->sock != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 				"seconds\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 				xprt, xprt->reestablish_timeout / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		/* Start by resetting any existing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		xs_reset_transport(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		delay = xprt_reconnect_delay(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	queue_delayed_work(xprtiod_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 			&transport->connect_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 			delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) static void xs_wake_disconnect(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		xs_tcp_force_close(&transport->xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static void xs_wake_write(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		xprt_write_space(&transport->xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) static void xs_wake_error(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	int sockerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	mutex_lock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	if (transport->sock == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	sockerr = xchg(&transport->xprt_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	if (sockerr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		xprt_wake_pending_tasks(&transport->xprt, sockerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	mutex_unlock(&transport->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) static void xs_wake_pending(struct sock_xprt *transport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) static void xs_error_handle(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	struct sock_xprt *transport = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 			struct sock_xprt, error_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	xs_wake_disconnect(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	xs_wake_write(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	xs_wake_error(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	xs_wake_pending(transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  * xs_local_print_stats - display AF_LOCAL socket-specifc stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  * @xprt: rpc_xprt struct containing statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  * @seq: output file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	long idle_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	if (xprt_connected(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 			"%llu %llu %lu %llu %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 			xprt->stat.bind_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 			xprt->stat.connect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 			xprt->stat.connect_time / HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 			idle_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 			xprt->stat.sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 			xprt->stat.recvs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 			xprt->stat.bad_xids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 			xprt->stat.req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 			xprt->stat.bklog_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 			xprt->stat.max_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 			xprt->stat.sending_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			xprt->stat.pending_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)  * xs_udp_print_stats - display UDP socket-specifc stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)  * @xprt: rpc_xprt struct containing statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)  * @seq: output file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 			"%lu %llu %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 			transport->srcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 			xprt->stat.bind_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 			xprt->stat.sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 			xprt->stat.recvs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 			xprt->stat.bad_xids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 			xprt->stat.req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 			xprt->stat.bklog_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			xprt->stat.max_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 			xprt->stat.sending_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			xprt->stat.pending_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)  * xs_tcp_print_stats - display TCP socket-specifc stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)  * @xprt: rpc_xprt struct containing statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)  * @seq: output file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	long idle_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	if (xprt_connected(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			"%llu %llu %lu %llu %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			transport->srcport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 			xprt->stat.bind_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			xprt->stat.connect_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			xprt->stat.connect_time / HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			idle_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 			xprt->stat.sends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 			xprt->stat.recvs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 			xprt->stat.bad_xids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			xprt->stat.req_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			xprt->stat.bklog_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			xprt->stat.max_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 			xprt->stat.sending_u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 			xprt->stat.pending_u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)  * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)  * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)  * to use the server side send routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static int bc_malloc(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	struct rpc_rqst *rqst = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	size_t size = rqst->rq_callsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	struct rpc_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 			  size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	buf = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	buf->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	rqst->rq_buffer = buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)  * Free the space allocated in the bc_alloc routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) static void bc_free(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	void *buffer = task->tk_rqstp->rq_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	struct rpc_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	buf = container_of(buffer, struct rpc_buffer, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	free_page((unsigned long)buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static int bc_sendto(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	struct xdr_buf *xdr = &req->rq_snd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	struct sock_xprt *transport =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			container_of(req->rq_xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		.msg_flags	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 					 (u32)xdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	unsigned int sent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	req->rq_xtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	xdr_free_bvec(xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	if (err < 0 || sent != (xdr->len + sizeof(marker)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	return sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)  * bc_send_request - Send a backchannel Call on a TCP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)  * @req: rpc_rqst containing Call message to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  * xpt_mutex ensures @rqstp's whole message is written to the socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)  * without interruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  *   %0 if the message was sent successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)  *   %ENOTCONN if the message was not sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) static int bc_send_request(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	struct svc_xprt	*xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	 * Get the server socket associated with this callback xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	xprt = req->rq_xprt->bc_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	 * Grab the mutex to serialize data as the connection is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	 * with the fore channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	mutex_lock(&xprt->xpt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		len = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		len = bc_sendto(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	mutex_unlock(&xprt->xpt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)  * The close routine. Since this is client initiated, we do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static void bc_close(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	xprt_disconnect_done(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)  * The xprt destroy routine. Again, because this connection is client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)  * initiated, we do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) static void bc_destroy(struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	dprintk("RPC:       bc_destroy xprt %p\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static const struct rpc_xprt_ops xs_local_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	.reserve_xprt		= xprt_reserve_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	.release_xprt		= xprt_release_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	.alloc_slot		= xprt_alloc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	.free_slot		= xprt_free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	.rpcbind		= xs_local_rpcbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	.set_port		= xs_local_set_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	.connect		= xs_local_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	.buf_alloc		= rpc_malloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	.buf_free		= rpc_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	.prepare_request	= xs_stream_prepare_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	.send_request		= xs_local_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	.close			= xs_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	.destroy		= xs_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	.print_stats		= xs_local_print_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	.enable_swap		= xs_enable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	.disable_swap		= xs_disable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) static const struct rpc_xprt_ops xs_udp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	.set_buffer_size	= xs_udp_set_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	.reserve_xprt		= xprt_reserve_xprt_cong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	.release_xprt		= xprt_release_xprt_cong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	.alloc_slot		= xprt_alloc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	.free_slot		= xprt_free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	.rpcbind		= rpcb_getport_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	.set_port		= xs_set_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	.connect		= xs_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	.buf_alloc		= rpc_malloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	.buf_free		= rpc_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	.send_request		= xs_udp_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	.wait_for_reply_request	= xprt_wait_for_reply_request_rtt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	.timer			= xs_udp_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	.release_request	= xprt_release_rqst_cong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	.close			= xs_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	.destroy		= xs_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	.print_stats		= xs_udp_print_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	.enable_swap		= xs_enable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	.disable_swap		= xs_disable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	.inject_disconnect	= xs_inject_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) static const struct rpc_xprt_ops xs_tcp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	.reserve_xprt		= xprt_reserve_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	.release_xprt		= xprt_release_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	.alloc_slot		= xprt_alloc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	.free_slot		= xprt_free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	.rpcbind		= rpcb_getport_async,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	.set_port		= xs_set_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	.connect		= xs_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	.buf_alloc		= rpc_malloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	.buf_free		= rpc_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	.prepare_request	= xs_stream_prepare_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	.send_request		= xs_tcp_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	.close			= xs_tcp_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	.destroy		= xs_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	.set_connect_timeout	= xs_tcp_set_connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	.print_stats		= xs_tcp_print_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	.enable_swap		= xs_enable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	.disable_swap		= xs_disable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	.inject_disconnect	= xs_inject_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) #ifdef CONFIG_SUNRPC_BACKCHANNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	.bc_setup		= xprt_setup_bc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	.bc_maxpayload		= xs_tcp_bc_maxpayload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	.bc_num_slots		= xprt_bc_max_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	.bc_free_rqst		= xprt_free_bc_rqst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	.bc_destroy		= xprt_destroy_bc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)  * The rpc_xprt_ops for the server backchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) static const struct rpc_xprt_ops bc_tcp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	.reserve_xprt		= xprt_reserve_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	.release_xprt		= xprt_release_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	.alloc_slot		= xprt_alloc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	.free_slot		= xprt_free_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	.buf_alloc		= bc_malloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	.buf_free		= bc_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	.send_request		= bc_send_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	.close			= bc_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	.destroy		= bc_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	.print_stats		= xs_tcp_print_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	.enable_swap		= xs_enable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	.disable_swap		= xs_disable_swap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	.inject_disconnect	= xs_inject_disconnect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) static int xs_init_anyaddr(const int family, struct sockaddr *sap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	static const struct sockaddr_in sin = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		.sin_family		= AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		.sin_addr.s_addr	= htonl(INADDR_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	static const struct sockaddr_in6 sin6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		.sin6_family		= AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		.sin6_addr		= IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	case AF_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		memcpy(sap, &sin, sizeof(sin));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		memcpy(sap, &sin6, sizeof(sin6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		dprintk("RPC:       %s: Bad address family\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 				      unsigned int slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 				      unsigned int max_slot_table_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	struct sock_xprt *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	if (args->addrlen > sizeof(xprt->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		dprintk("RPC:       xs_setup_xprt: address too large\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 			max_slot_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	if (xprt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 				"rpc_xprt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	new = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	mutex_init(&new->recv_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	xprt->addrlen = args->addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (args->srcaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		err = xs_init_anyaddr(args->dstaddr->sa_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 					(struct sockaddr *)&new->srcaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 			return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static const struct rpc_timeout xs_local_default_timeout = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	.to_initval = 10 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	.to_maxval = 10 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 	.to_retries = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)  * xs_setup_local - Set up transport to use an AF_LOCAL socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)  * @args: rpc transport creation arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)  * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	struct rpc_xprt *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 			xprt_max_tcp_slot_table_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	xprt->prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	xprt->bind_timeout = XS_BIND_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	xprt->idle_timeout = XS_IDLE_DISC_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	xprt->ops = &xs_local_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	xprt->timeout = &xs_local_default_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	INIT_WORK(&transport->error_worker, xs_error_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 	switch (sun->sun_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	case AF_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		if (sun->sun_path[0] != '/') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 			dprintk("RPC:       bad AF_LOCAL address: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 					sun->sun_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 			ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		ret = ERR_PTR(xs_local_setup_socket(transport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		ret = ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	dprintk("RPC:       set up xprt to %s via AF_LOCAL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 			xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	if (try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 	ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) static const struct rpc_timeout xs_udp_default_timeout = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	.to_initval = 5 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	.to_maxval = 30 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	.to_increment = 5 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	.to_retries = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)  * xs_setup_udp - Set up transport to use a UDP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)  * @args: rpc transport creation arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	struct sockaddr *addr = args->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	struct rpc_xprt *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 			xprt_udp_slot_table_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	xprt->prot = IPPROTO_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	/* XXX: header size can vary due to auth type, IPv6, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	xprt->bind_timeout = XS_BIND_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	xprt->reestablish_timeout = XS_UDP_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	xprt->idle_timeout = XS_IDLE_DISC_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	xprt->ops = &xs_udp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	xprt->timeout = &xs_udp_default_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	INIT_WORK(&transport->error_worker, xs_error_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	switch (addr->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 			xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		ret = ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	if (xprt_bound(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 				xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 				xprt->address_strings[RPC_DISPLAY_PORT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 				xprt->address_strings[RPC_DISPLAY_PROTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 				xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 				xprt->address_strings[RPC_DISPLAY_PROTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	if (try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) static const struct rpc_timeout xs_tcp_default_timeout = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	.to_initval = 60 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	.to_maxval = 60 * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	.to_retries = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)  * xs_setup_tcp - Set up transport to use a TCP socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)  * @args: rpc transport creation arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	struct sockaddr *addr = args->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	struct rpc_xprt *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			max_slot_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	xprt->prot = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	xprt->bind_timeout = XS_BIND_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	xprt->idle_timeout = XS_IDLE_DISC_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	xprt->ops = &xs_tcp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	xprt->timeout = &xs_tcp_default_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	xprt->connect_timeout = xprt->timeout->to_initval *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		(xprt->timeout->to_retries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	INIT_WORK(&transport->error_worker, xs_error_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	switch (addr->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		ret = ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	if (xprt_bound(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 				xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 				xprt->address_strings[RPC_DISPLAY_PORT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 				xprt->address_strings[RPC_DISPLAY_PROTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 				xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 				xprt->address_strings[RPC_DISPLAY_PROTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	if (try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)  * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)  * @args: rpc transport creation arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	struct sockaddr *addr = args->dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	struct sock_xprt *transport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	struct svc_sock *bc_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	struct rpc_xprt *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			xprt_tcp_slot_table_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	transport = container_of(xprt, struct sock_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	xprt->prot = IPPROTO_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	xprt->timeout = &xs_tcp_default_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	/* backchannel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	xprt_set_bound(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	xprt->bind_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	xprt->reestablish_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	xprt->idle_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	xprt->ops = &bc_tcp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	switch (addr->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		xs_format_peer_addresses(xprt, "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 					 RPCBIND_NETID_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		xs_format_peer_addresses(xprt, "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 				   RPCBIND_NETID_TCP6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		ret = ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 			xprt->address_strings[RPC_DISPLAY_ADDR],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 			xprt->address_strings[RPC_DISPLAY_PORT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 			xprt->address_strings[RPC_DISPLAY_PROTO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	 * Once we've associated a backchannel xprt with a connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	 * we want to keep it around as long as the connection lasts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	 * in case we need to start using it for a backchannel again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	 * this reference won't be dropped until bc_xprt is destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	args->bc_xprt->xpt_bc_xprt = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	xprt->bc_xprt = args->bc_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	transport->sock = bc_sock->sk_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	transport->inet = bc_sock->sk_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	 * Since we don't want connections for the backchannel, we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	 * the xprt status to connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	xprt_set_connected(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	if (try_module_get(THIS_MODULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 		return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	args->bc_xprt->xpt_bc_xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	args->bc_xprt->xpt_bc_xps = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	ret = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	xs_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) static struct xprt_class	xs_local_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	.list		= LIST_HEAD_INIT(xs_local_transport.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	.name		= "named UNIX socket",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	.ident		= XPRT_TRANSPORT_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	.setup		= xs_setup_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	.netid		= { "" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) static struct xprt_class	xs_udp_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	.name		= "udp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	.ident		= XPRT_TRANSPORT_UDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	.setup		= xs_setup_udp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	.netid		= { "udp", "udp6", "" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) static struct xprt_class	xs_tcp_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	.name		= "tcp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	.ident		= XPRT_TRANSPORT_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	.setup		= xs_setup_tcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	.netid		= { "tcp", "tcp6", "" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) static struct xprt_class	xs_bc_tcp_transport = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	.name		= "tcp NFSv4.1 backchannel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 	.ident		= XPRT_TRANSPORT_BC_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	.setup		= xs_setup_bc_tcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	.netid		= { "" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)  * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) int init_socket_xprt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	if (!sunrpc_table_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 		sunrpc_table_header = register_sysctl_table(sunrpc_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	xprt_register_transport(&xs_local_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	xprt_register_transport(&xs_udp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	xprt_register_transport(&xs_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	xprt_register_transport(&xs_bc_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)  * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) void cleanup_socket_xprt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	if (sunrpc_table_header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 		unregister_sysctl_table(sunrpc_table_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 		sunrpc_table_header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	xprt_unregister_transport(&xs_local_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	xprt_unregister_transport(&xs_udp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	xprt_unregister_transport(&xs_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	xprt_unregister_transport(&xs_bc_tcp_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static int param_set_uint_minmax(const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		const struct kernel_param *kp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		unsigned int min, unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	ret = kstrtouint(val, 0, &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	if (num < min || num > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	*((unsigned int *)kp->arg) = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) static int param_set_portnr(const char *val, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 	return param_set_uint_minmax(val, kp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 			RPC_MIN_RESVPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 			RPC_MAX_RESVPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) static const struct kernel_param_ops param_ops_portnr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	.set = param_set_portnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	.get = param_get_uint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) #define param_check_portnr(name, p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	__param_check(name, p, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) static int param_set_slot_table_size(const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 				     const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	return param_set_uint_minmax(val, kp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 			RPC_MIN_SLOT_TABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 			RPC_MAX_SLOT_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static const struct kernel_param_ops param_ops_slot_table_size = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	.set = param_set_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 	.get = param_get_uint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) #define param_check_slot_table_size(name, p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	__param_check(name, p, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) static int param_set_max_slot_table_size(const char *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 				     const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	return param_set_uint_minmax(val, kp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 			RPC_MIN_SLOT_TABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 			RPC_MAX_SLOT_TABLE_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) static const struct kernel_param_ops param_ops_max_slot_table_size = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	.set = param_set_max_slot_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	.get = param_get_uint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) #define param_check_max_slot_table_size(name, p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	__param_check(name, p, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 		   slot_table_size, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		   max_slot_table_size, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 		   slot_table_size, 0644);