Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/net/sunrpc/clnt.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  This file contains the high-level RPC interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  It is modeled as a finite state machine to support both synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  and asynchronous requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  -	RPC header generation and argument serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  -	Credential refresh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *  -	TCP connect handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *  -	Retry of operation when it is suspected the operation failed because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *	of uid squashing on the server, or when the credentials were stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *	and need to be refreshed, or when a packet was damaged in transit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *	This may be have to be moved to the VFS layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/un.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/sunrpc/rpc_pipe_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/sunrpc/metrics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/sunrpc/bc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "sunrpc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) # define RPCDBG_FACILITY	RPCDBG_CALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * All RPC clients are linked into this list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static void	call_start(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static void	call_reserve(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static void	call_reserveresult(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static void	call_allocate(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static void	call_encode(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static void	call_decode(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void	call_bind(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static void	call_bind_status(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static void	call_transmit(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static void	call_status(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static void	call_transmit_status(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void	call_refresh(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static void	call_refreshresult(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static void	call_connect(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void	call_connect_status(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int	rpc_encode_header(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 				  struct xdr_stream *xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static int	rpc_decode_header(struct rpc_task *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 				  struct xdr_stream *xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int	rpc_ping(struct rpc_clnt *clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static void	rpc_check_timeout(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static void rpc_register_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct net *net = rpc_net_ns(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	spin_lock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	list_add(&clnt->cl_clients, &sn->all_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	spin_unlock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static void rpc_unregister_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct net *net = rpc_net_ns(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	spin_lock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	list_del(&clnt->cl_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	spin_unlock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	rpc_remove_client_dir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct net *net = rpc_net_ns(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct super_block *pipefs_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	pipefs_sb = rpc_get_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (pipefs_sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		__rpc_clnt_remove_pipedir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		rpc_put_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 				    struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	static uint32_t clntid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	const char *dir_name = clnt->cl_program->pipe_dir_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	char name[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct dentry *dir, *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	dir = rpc_d_lookup_sb(sb, dir_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	if (dir == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		return dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		name[sizeof(name) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		dentry = rpc_create_client_dir(dir, name, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (!IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		if (dentry == ERR_PTR(-EEXIST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		printk(KERN_INFO "RPC: Couldn't create pipefs entry"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 				" %s/%s, error %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 				dir_name, name, PTR_ERR(dentry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	dput(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	if (clnt->cl_program->pipe_dir_name != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			return PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (clnt->cl_program->pipe_dir_name == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	case RPC_PIPEFS_MOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		if (atomic_read(&clnt->cl_count) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	case RPC_PIPEFS_UMOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		if (clnt->cl_pipedir_objects.pdh_dentry == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 				   struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	case RPC_PIPEFS_MOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		dentry = rpc_setup_pipedir_sb(sb, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		if (!dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		if (IS_ERR(dentry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			return PTR_ERR(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	case RPC_PIPEFS_UMOUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		__rpc_clnt_remove_pipedir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 				struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	for (;; clnt = clnt->cl_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		if (!rpc_clnt_skip_event(clnt, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			error = __rpc_clnt_handle_event(clnt, event, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		if (error || clnt == clnt->cl_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	spin_lock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		if (rpc_clnt_skip_event(clnt, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		spin_unlock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	spin_unlock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			    void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct super_block *sb = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		error = __rpc_pipefs_event(clnt, event, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static struct notifier_block rpc_clients_block = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	.notifier_call	= rpc_pipefs_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	.priority	= SUNRPC_PIPEFS_RPC_PRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) int rpc_clients_notifier_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return rpc_pipefs_notifier_register(&rpc_clients_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) void rpc_clients_notifier_unregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	return rpc_pipefs_notifier_unregister(&rpc_clients_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		const struct rpc_timeout *timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct rpc_xprt *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	spin_lock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	old = rcu_dereference_protected(clnt->cl_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 			lockdep_is_held(&clnt->cl_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (!xprt_bound(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		clnt->cl_autobind = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	clnt->cl_timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	rcu_assign_pointer(clnt->cl_xprt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	spin_unlock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			nodename, sizeof(clnt->cl_nodename));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static int rpc_client_register(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			       rpc_authflavor_t pseudoflavor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			       const char *client_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct rpc_auth_create_args auth_args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		.pseudoflavor = pseudoflavor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		.target_name = client_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	struct rpc_auth *auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	struct net *net = rpc_net_ns(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct super_block *pipefs_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	rpc_clnt_debugfs_register(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	pipefs_sb = rpc_get_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	if (pipefs_sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		err = rpc_setup_pipedir(pipefs_sb, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	rpc_register_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	if (pipefs_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		rpc_put_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	auth = rpcauth_create(&auth_args, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (IS_ERR(auth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 				pseudoflavor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		err = PTR_ERR(auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		goto err_auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) err_auth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	pipefs_sb = rpc_get_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	rpc_unregister_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	__rpc_clnt_remove_pipedir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (pipefs_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		rpc_put_sb_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	rpc_clnt_debugfs_unregister(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static DEFINE_IDA(rpc_clids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) void rpc_cleanup_clids(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	ida_destroy(&rpc_clids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static int rpc_alloc_clid(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	int clid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (clid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return clid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	clnt->cl_clid = clid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) static void rpc_free_clid(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	ida_simple_remove(&rpc_clids, clnt->cl_clid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		struct rpc_xprt_switch *xps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		struct rpc_clnt *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	const struct rpc_program *program = args->program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	const struct rpc_version *version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct rpc_clnt *clnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	const struct rpc_timeout *timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	const char *nodename = args->nodename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	err = rpciod_up();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		goto out_no_rpciod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (args->version >= program->nrvers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	version = program->version[args->version];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (version == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	if (!clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	clnt->cl_parent = parent ? : clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	err = rpc_alloc_clid(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		goto out_no_clid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	clnt->cl_cred	  = get_cred(args->cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	clnt->cl_procinfo = version->procs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	clnt->cl_maxproc  = version->nrprocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	clnt->cl_prog     = args->prognumber ? : program->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	clnt->cl_vers     = version->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	clnt->cl_stats    = program->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	clnt->cl_metrics  = rpc_alloc_iostats(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (clnt->cl_metrics == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		goto out_no_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	clnt->cl_program  = program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	INIT_LIST_HEAD(&clnt->cl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	spin_lock_init(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	timeout = xprt->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (args->timeout != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		memcpy(&clnt->cl_timeout_default, args->timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				sizeof(clnt->cl_timeout_default));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		timeout = &clnt->cl_timeout_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	rpc_clnt_set_transport(clnt, xprt, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	xprt_iter_init(&clnt->cl_xpi, xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	clnt->cl_rtt = &clnt->cl_rtt_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	atomic_set(&clnt->cl_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (nodename == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		nodename = utsname()->nodename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	/* save the nodename */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	rpc_clnt_set_nodename(clnt, nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	err = rpc_client_register(clnt, args->authflavor, args->client_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		goto out_no_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	if (parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		atomic_inc(&parent->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) out_no_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	rpc_free_iostats(clnt->cl_metrics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) out_no_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	put_cred(clnt->cl_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	rpc_free_clid(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) out_no_clid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	kfree(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	rpciod_down();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) out_no_rpciod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	trace_rpc_clnt_new_err(program->name, args->servername, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 					struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct rpc_clnt *clnt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		xps = args->bc_xprt->xpt_bc_xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		xprt_switch_get(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		xps = xprt_switch_alloc(xprt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (xps == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (xprt->bc_xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			xprt_switch_get(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			xprt->bc_xprt->xpt_bc_xps = xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	clnt = rpc_new_client(args, xps, xprt, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (IS_ERR(clnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		int err = rpc_ping(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			rpc_shutdown_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	clnt->cl_softrtry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		clnt->cl_softrtry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		if (args->flags & RPC_CLNT_CREATE_SOFTERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			clnt->cl_softerr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		clnt->cl_autobind = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		clnt->cl_noretranstimeo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		clnt->cl_discrtry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		clnt->cl_chatty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * rpc_create - create an RPC client and transport with one call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  * @args: rpc_clnt create argument structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * Creates and initializes an RPC transport and an RPC client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * It can ping the server in order to determine if it is up, and to see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * it supports this program and version.  RPC_CLNT_CREATE_NOPING disables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * this behavior so asynchronous tasks can also use rpc_create.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) struct rpc_clnt *rpc_create(struct rpc_create_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct xprt_create xprtargs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		.net = args->net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		.ident = args->protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		.srcaddr = args->saddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		.dstaddr = args->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		.addrlen = args->addrsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		.servername = args->servername,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		.bc_xprt = args->bc_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	char servername[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (args->bc_xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		xprt = args->bc_xprt->xpt_bc_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		if (xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			return rpc_create_xprt(args, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 * If the caller chooses not to specify a hostname, whip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 * up a string representation of the passed-in address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (xprtargs.servername == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		struct sockaddr_un *sun =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				(struct sockaddr_un *)args->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		struct sockaddr_in *sin =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				(struct sockaddr_in *)args->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		struct sockaddr_in6 *sin6 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 				(struct sockaddr_in6 *)args->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		servername[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		switch (args->address->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		case AF_LOCAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			snprintf(servername, sizeof(servername), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 				 sun->sun_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			snprintf(servername, sizeof(servername), "%pI4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				 &sin->sin_addr.s_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			snprintf(servername, sizeof(servername), "%pI6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				 &sin6->sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			/* caller wants default server name, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			 * address family isn't recognized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		xprtargs.servername = servername;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	xprt = xprt_create_transport(&xprtargs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return (struct rpc_clnt *)xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 * By default, kernel RPC client connects from a reserved port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 * but it is always enabled for rpciod, which handles the connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	xprt->resvport = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		xprt->resvport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	xprt->reuseport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (args->flags & RPC_CLNT_CREATE_REUSEPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		xprt->reuseport = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	clnt = rpc_create_xprt(args, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (IS_ERR(clnt) || args->nconnect <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	for (i = 0; i < args->nconnect - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) EXPORT_SYMBOL_GPL(rpc_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * This function clones the RPC client structure. It allows us to share the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * same transport while varying parameters such as the authentication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  * flavour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 					   struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct rpc_clnt *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (xprt == NULL || xps == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	args->servername = xprt->servername;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	args->nodename = clnt->cl_nodename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	new = rpc_new_client(args, xps, xprt, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (IS_ERR(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	/* Turn off autobind on clones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	new->cl_autobind = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	new->cl_softrtry = clnt->cl_softrtry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	new->cl_softerr = clnt->cl_softerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	new->cl_noretranstimeo = clnt->cl_noretranstimeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	new->cl_discrtry = clnt->cl_discrtry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	new->cl_chatty = clnt->cl_chatty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	new->cl_principal = clnt->cl_principal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	trace_rpc_clnt_clone_err(clnt, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * rpc_clone_client - Clone an RPC client structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  * @clnt: RPC client whose parameters are copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * Returns a fresh RPC client or an ERR_PTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct rpc_create_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		.program	= clnt->cl_program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		.prognumber	= clnt->cl_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		.version	= clnt->cl_vers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		.authflavor	= clnt->cl_auth->au_flavor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		.cred		= clnt->cl_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	return __rpc_clone_client(&args, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) EXPORT_SYMBOL_GPL(rpc_clone_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * @clnt: RPC client whose parameters are copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  * @flavor: security flavor for new client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * Returns a fresh RPC client or an ERR_PTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) struct rpc_clnt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	struct rpc_create_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		.program	= clnt->cl_program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		.prognumber	= clnt->cl_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		.version	= clnt->cl_vers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		.authflavor	= flavor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		.cred		= clnt->cl_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	return __rpc_clone_client(&args, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * rpc_switch_client_transport: switch the RPC transport on the fly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * @clnt: pointer to a struct rpc_clnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * @args: pointer to the new transport arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * @timeout: pointer to the new timeout parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * This function allows the caller to switch the RPC transport for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * server, for instance.  It assumes that the caller has ensured that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * there are no active RPC tasks by using some form of locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * Returns zero if "clnt" is now using the new xprt.  Otherwise a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * negative errno is returned, and "clnt" continues to use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * xprt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) int rpc_switch_client_transport(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		struct xprt_create *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		const struct rpc_timeout *timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	const struct rpc_timeout *old_timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	rpc_authflavor_t pseudoflavor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	struct rpc_xprt_switch *xps, *oldxps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct rpc_xprt *xprt, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	struct rpc_clnt *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	xprt = xprt_create_transport(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		return PTR_ERR(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	xps = xprt_switch_alloc(xprt, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (xps == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	pseudoflavor = clnt->cl_auth->au_flavor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	old_timeo = clnt->cl_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	old = rpc_clnt_set_transport(clnt, xprt, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	rpc_unregister_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	__rpc_clnt_remove_pipedir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	rpc_clnt_debugfs_unregister(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	 * A new transport was created.  "clnt" therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * becomes the root of a new cl_parent tree.  clnt's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	 * children, if it has any, still point to the old xprt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	parent = clnt->cl_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	clnt->cl_parent = clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * The old rpc_auth cache cannot be re-used.  GSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * contexts in particular are between a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * client and server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	err = rpc_client_register(clnt, pseudoflavor, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		goto out_revert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if (parent != clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		rpc_release_client(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	xprt_switch_put(oldxps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	xprt_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	trace_rpc_clnt_replace_xprt(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) out_revert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	rpc_clnt_set_transport(clnt, old, old_timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	clnt->cl_parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	rpc_client_register(clnt, pseudoflavor, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	trace_rpc_clnt_replace_xprt_err(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (xps == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	xprt_iter_init_listall(xpi, xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  * @clnt: pointer to client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  * @fn: function to apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  * @data: void pointer to function data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * Iterates through the list of RPC transports currently attached to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  * client and applies the function fn(clnt, xprt, data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * On error, the iteration stops, and the function returns the error value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct rpc_xprt_iter xpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	ret = rpc_clnt_xprt_iter_init(clnt, &xpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		struct rpc_xprt *xprt = xprt_iter_get_next(&xpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		if (!xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		ret = fn(clnt, xprt, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	xprt_iter_destroy(&xpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Kill all tasks for the given client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  * XXX: kill their descendants as well?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) void rpc_killall_tasks(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct rpc_task	*rovr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	if (list_empty(&clnt->cl_tasks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	 * Spin lock all_tasks to prevent changes...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	trace_rpc_clnt_killall(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	spin_lock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		rpc_signal_task(rovr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	spin_unlock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) EXPORT_SYMBOL_GPL(rpc_killall_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * Properly shut down an RPC client, terminating all outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) void rpc_shutdown_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	trace_rpc_clnt_shutdown(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	while (!list_empty(&clnt->cl_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		rpc_killall_tasks(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		wait_event_timeout(destroy_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			list_empty(&clnt->cl_tasks), 1*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	rpc_release_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) EXPORT_SYMBOL_GPL(rpc_shutdown_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * Free an RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) static void rpc_free_client_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	trace_rpc_clnt_free(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* These might block on processes that might allocate memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * so they cannot be called in rpciod, so they are handled separately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	rpc_clnt_debugfs_unregister(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	rpc_free_clid(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	rpc_clnt_remove_pipedir(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	xprt_put(rcu_dereference_raw(clnt->cl_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	kfree(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	rpciod_down();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static struct rpc_clnt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) rpc_free_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	struct rpc_clnt *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	trace_rpc_clnt_release(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (clnt->cl_parent != clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		parent = clnt->cl_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	rpc_unregister_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	rpc_free_iostats(clnt->cl_metrics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	clnt->cl_metrics = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	xprt_iter_destroy(&clnt->cl_xpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	put_cred(clnt->cl_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	INIT_WORK(&clnt->cl_work, rpc_free_client_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	schedule_work(&clnt->cl_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * Free an RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) static struct rpc_clnt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) rpc_free_auth(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (clnt->cl_auth == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return rpc_free_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * Note: RPCSEC_GSS may need to send NULL RPC calls in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 *       release remaining GSS contexts. This mechanism ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 *       that it can do so safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	atomic_inc(&clnt->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	rpcauth_release(clnt->cl_auth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	clnt->cl_auth = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (atomic_dec_and_test(&clnt->cl_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		return rpc_free_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * Release reference to the RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) rpc_release_client(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		if (list_empty(&clnt->cl_tasks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			wake_up(&destroy_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (!atomic_dec_and_test(&clnt->cl_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		clnt = rpc_free_auth(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	} while (clnt != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) EXPORT_SYMBOL_GPL(rpc_release_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * rpc_bind_new_program - bind a new RPC program to an existing client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * @old: old rpc_client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * @program: rpc program to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * @vers: rpc program version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  * Clones the rpc client and sets up a new RPC program. This is mainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  * of use for enabling different RPC programs to share the same transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955)  * The Sun NFSv2/v3 ACL protocol can do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				      const struct rpc_program *program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				      u32 vers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	struct rpc_create_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		.program	= program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		.prognumber	= program->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		.version	= vers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		.authflavor	= old->cl_auth->au_flavor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		.cred		= old->cl_cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	clnt = __rpc_clone_client(&args, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	if (IS_ERR(clnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	err = rpc_ping(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		rpc_shutdown_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		clnt = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	return clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) EXPORT_SYMBOL_GPL(rpc_bind_new_program);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) struct rpc_xprt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (!xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	atomic_long_inc(&xps->xps_queuelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	atomic_long_inc(&xprt->queuelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	atomic_long_dec(&xprt->queuelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	atomic_long_dec(&xps->xps_queuelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) void rpc_task_release_transport(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	struct rpc_xprt *xprt = task->tk_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	if (xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		task->tk_xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		if (task->tk_client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			rpc_task_release_xprt(task->tk_client, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) EXPORT_SYMBOL_GPL(rpc_task_release_transport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) void rpc_task_release_client(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	struct rpc_clnt *clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	rpc_task_release_transport(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (clnt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		/* Remove from client task list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		spin_lock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		list_del(&task->tk_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		spin_unlock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		task->tk_client = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		rpc_release_client(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static struct rpc_xprt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) rpc_task_get_first_xprt(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	return rpc_task_get_xprt(clnt, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static struct rpc_xprt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rpc_task_get_next_xprt(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (task->tk_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		task->tk_xprt = rpc_task_get_first_xprt(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		task->tk_xprt = rpc_task_get_next_xprt(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (clnt != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		rpc_task_set_transport(task, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		task->tk_client = clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		atomic_inc(&clnt->cl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (clnt->cl_softrtry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			task->tk_flags |= RPC_TASK_SOFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		if (clnt->cl_softerr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			task->tk_flags |= RPC_TASK_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		if (clnt->cl_noretranstimeo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		if (atomic_read(&clnt->cl_swapper))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			task->tk_flags |= RPC_TASK_SWAPPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		/* Add to the client's list of all tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		spin_lock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		list_add_tail(&task->tk_task, &clnt->cl_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		spin_unlock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (msg != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		task->tk_msg.rpc_proc = msg->rpc_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		task->tk_msg.rpc_argp = msg->rpc_argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		task->tk_msg.rpc_resp = msg->rpc_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		task->tk_msg.rpc_cred = msg->rpc_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			get_cred(task->tk_msg.rpc_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * Default callback for async RPC calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) rpc_default_callback(struct rpc_task *task, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const struct rpc_call_ops rpc_default_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	.rpc_call_done = rpc_default_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * @task_setup_data: pointer to task initialisation data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	task = rpc_new_task(task_setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	if (!RPC_IS_ASYNC(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		task->tk_flags |= RPC_TASK_CRED_NOREF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	rpc_task_set_client(task, task_setup_data->rpc_client);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (task->tk_action == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		rpc_call_start(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	atomic_inc(&task->tk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	rpc_execute(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) EXPORT_SYMBOL_GPL(rpc_run_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * rpc_call_sync - Perform a synchronous RPC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * @clnt: pointer to RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @msg: RPC call parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * @flags: RPC call flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	struct rpc_task	*task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	struct rpc_task_setup task_setup_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		.rpc_client = clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		.rpc_message = msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		.callback_ops = &rpc_default_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		.flags = flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	WARN_ON_ONCE(flags & RPC_TASK_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (flags & RPC_TASK_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		rpc_release_calldata(task_setup_data.callback_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			task_setup_data.callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	task = rpc_run_task(&task_setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		return PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	rpc_put_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) EXPORT_SYMBOL_GPL(rpc_call_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  * rpc_call_async - Perform an asynchronous RPC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  * @clnt: pointer to RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * @msg: RPC call parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * @flags: RPC call flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * @tk_ops: RPC call ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * @data: user call data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	       const struct rpc_call_ops *tk_ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct rpc_task	*task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	struct rpc_task_setup task_setup_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		.rpc_client = clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		.rpc_message = msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		.callback_ops = tk_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		.callback_data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		.flags = flags|RPC_TASK_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	task = rpc_run_task(&task_setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		return PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	rpc_put_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) EXPORT_SYMBOL_GPL(rpc_call_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static void call_bc_encode(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * rpc_execute against it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * @req: RPC request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct rpc_task_setup task_setup_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		.callback_ops = &rpc_default_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		.flags = RPC_TASK_SOFTCONN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			RPC_TASK_NO_RETRANS_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	dprintk("RPC: rpc_run_bc_task req= %p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	 * Create an rpc_task to send the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	task = rpc_new_task(&task_setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	xprt_init_bc_request(req, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	task->tk_action = call_bc_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	atomic_inc(&task->tk_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	rpc_execute(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #endif /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  * @req: RPC request to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)  * @pages: vector of struct page pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * @base: offset in first page where receive should start, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * @len: expected size of the upper layer data payload, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  * @hdrsize: expected size of upper layer reply header, in XDR words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			     unsigned int base, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			     unsigned int hdrsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	/* Subtract one to force an extra word of buffer space for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	 * payload's XDR pad to fall into the rcv_buf's tail iovec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) rpc_call_start(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	task->tk_action = call_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) EXPORT_SYMBOL_GPL(rpc_call_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  * rpc_peeraddr - extract remote peer address from clnt's xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)  * @clnt: RPC client structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)  * @buf: target buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)  * @bufsize: length of target buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  * Returns the number of bytes that are actually in the stored address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	bytes = xprt->addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (bytes > bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		bytes = bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	memcpy(buf, &xprt->addr, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) EXPORT_SYMBOL_GPL(rpc_peeraddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  * rpc_peeraddr2str - return remote peer address in printable format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  * @clnt: RPC client structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  * @format: address format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * NB: the lifetime of the memory referenced by the returned pointer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * the same as the rpc_xprt itself.  As long as the caller uses this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * pointer, it must hold the RCU read lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			     enum rpc_display_format_t format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (xprt->address_strings[format] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return xprt->address_strings[format];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		return "unprintable";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static const struct sockaddr_in rpc_inaddr_loopback = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	.sin_family		= AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	.sin_addr.s_addr	= htonl(INADDR_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static const struct sockaddr_in6 rpc_in6addr_loopback = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	.sin6_family		= AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	.sin6_addr		= IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  * Try a getsockname() on a connected datagram socket.  Using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * connected datagram socket prevents leaving a socket in TIME_WAIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * This conserves the ephemeral port number space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  * Returns zero and fills in "buf" if successful; otherwise, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * negative errno is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			struct sockaddr *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	err = __sock_create(net, sap->sa_family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 				SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		dprintk("RPC:       can't create UDP socket (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	switch (sap->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		err = kernel_bind(sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 				(struct sockaddr *)&rpc_inaddr_loopback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 				sizeof(rpc_inaddr_loopback));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		err = kernel_bind(sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				(struct sockaddr *)&rpc_in6addr_loopback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 				sizeof(rpc_in6addr_loopback));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		err = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		dprintk("RPC:       can't bind UDP socket (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	err = kernel_connect(sock, sap, salen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		dprintk("RPC:       can't connect UDP socket (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	err = kernel_getsockname(sock, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		dprintk("RPC:       getsockname failed (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (buf->sa_family == AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		sin6->sin6_scope_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	dprintk("RPC:       %s succeeded\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  * Scraping a connected socket failed, so we don't have a useable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * local address.  Fallback: generate an address that will prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * the server from calling us back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * Returns zero and fills in "buf" if successful; otherwise, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * negative errno is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		if (buflen < sizeof(rpc_inaddr_loopback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		memcpy(buf, &rpc_inaddr_loopback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				sizeof(rpc_inaddr_loopback));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		if (buflen < sizeof(rpc_in6addr_loopback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		memcpy(buf, &rpc_in6addr_loopback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				sizeof(rpc_in6addr_loopback));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		dprintk("RPC:       %s: address family not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		return -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	dprintk("RPC:       %s: succeeded\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * rpc_localaddr - discover local endpoint address for an RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * @clnt: RPC client structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * @buf: target buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * @buflen: size of target buffer, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  * Returns zero and fills in "buf" and "buflen" if successful;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * otherwise, a negative errno is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * This works even if the underlying transport is not currently connected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  * or if the upper layer never previously provided a source address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * The result of this function call is transient: multiple calls in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  * succession may give different results, depending on how local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  * networking configuration changes over time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	struct sockaddr_storage address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	struct sockaddr *sap = (struct sockaddr *)&address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	size_t salen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	salen = xprt->addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	memcpy(sap, &xprt->addr, salen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	net = get_net(xprt->xprt_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	rpc_set_port(sap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	err = rpc_sockname(net, sap, salen, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	put_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		/* Couldn't discover local address, return ANYADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return rpc_anyaddr(sap->sa_family, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) EXPORT_SYMBOL_GPL(rpc_localaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	if (xprt->ops->set_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) EXPORT_SYMBOL_GPL(rpc_setbufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  * rpc_net_ns - Get the network namespace for this RPC client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  * @clnt: RPC client to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct net *rpc_net_ns(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct net *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	ret = rcu_dereference(clnt->cl_xprt)->xprt_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) EXPORT_SYMBOL_GPL(rpc_net_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * rpc_max_payload - Get maximum payload size for a transport, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  * @clnt: RPC client to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * For stream transports, this is one RPC record fragment (see RFC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * 1831), as we don't support multi-record requests yet.  For datagram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  * transports, this is the size of an IP packet minus the IP, UDP, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  * RPC header sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) size_t rpc_max_payload(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	ret = rcu_dereference(clnt->cl_xprt)->max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) EXPORT_SYMBOL_GPL(rpc_max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  * @clnt: RPC client to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	ret = xprt->ops->bc_maxpayload(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	xprt = rcu_dereference(clnt->cl_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	ret = xprt->ops->bc_num_slots(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) EXPORT_SYMBOL_GPL(rpc_num_bc_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  * rpc_force_rebind - force transport to check that remote port is unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  * @clnt: client to rebind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) void rpc_force_rebind(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (clnt->cl_autobind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		xprt_clear_bound(rcu_dereference(clnt->cl_xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) EXPORT_SYMBOL_GPL(rpc_force_rebind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	task->tk_rpc_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	task->tk_action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  * Restart an (async) RPC call. Usually called from within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * exit handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) rpc_restart_call(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	return __rpc_restart_call(task, call_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) EXPORT_SYMBOL_GPL(rpc_restart_call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * Restart an (async) RPC call from the call_prepare state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  * Usually called from within the exit handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) rpc_restart_call_prepare(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (task->tk_ops->rpc_call_prepare != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return __rpc_restart_call(task, rpc_prepare_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	return rpc_restart_call(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) const char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) *rpc_proc_name(const struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	if (proc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		if (proc->p_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			return proc->p_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 			return "NULL";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		return "no proc";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	trace_rpc_call_rpcerror(task, tk_status, rpc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	task->tk_rpc_status = rpc_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	rpc_exit(task, tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) rpc_call_rpcerror(struct rpc_task *task, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	__rpc_call_rpcerror(task, status, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  * 0.  Initial state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  *     Other FSM states can be visited zero or more times, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  *     this state is visited exactly once for each RPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) call_start(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	struct rpc_clnt	*clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	int idx = task->tk_msg.rpc_proc->p_statidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	trace_rpc_request(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	/* Increment call count (version might not be valid for ping) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	if (clnt->cl_program->version[clnt->cl_vers])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	clnt->cl_stats->rpccnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	task->tk_action = call_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	rpc_task_set_transport(task, clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * 1.	Reserve an RPC call slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) call_reserve(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	task->tk_status  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	task->tk_action  = call_reserveresult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	xprt_reserve(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static void call_retry_reserve(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  * 1b.	Grok the result of xprt_reserve()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) call_reserveresult(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	int status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	 * After a call to xprt_reserve(), we must have either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	 * a request slot or else an error status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	if (status >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		if (task->tk_rqstp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			task->tk_action = call_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		rpc_call_rpcerror(task, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		rpc_delay(task, HZ >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	case -EAGAIN:	/* woken up; retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		task->tk_action = call_retry_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  * 1c.	Retry reserving an RPC call slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) call_retry_reserve(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	task->tk_status  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	task->tk_action  = call_reserveresult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	xprt_retry_reserve(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * 2.	Bind and/or refresh the credentials
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) call_refresh(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	task->tk_action = call_refreshresult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	task->tk_client->cl_stats->rpcauthrefresh++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	rpcauth_refreshcred(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * 2a.	Process the results of a credential refresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) call_refreshresult(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	int status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	task->tk_action = call_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		if (rpcauth_uptodatecred(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			task->tk_action = call_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		/* Use rate-limiting and a max number of retries if refresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		 * had status 0 but failed to update the cred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		rpc_delay(task, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		status = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	case -EKEYEXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		if (!task->tk_cred_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		task->tk_cred_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		trace_rpc_retry_refresh_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	trace_rpc_refresh_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * 2b.	Allocate the buffer. For details, see sched.c:rpc_malloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  *	(Note: buffer memory is freed in xprt_release).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) call_allocate(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	struct rpc_rqst *req = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	struct rpc_xprt *xprt = req->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	if (req->rq_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	if (proc->p_proc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		BUG_ON(proc->p_arglen == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		if (proc->p_decode != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			BUG_ON(proc->p_replen == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	 * Calculate the size (in quads) of the RPC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	 * and reply headers, and convert both values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	 * to byte sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			   proc->p_arglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	req->rq_callsize <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	 * Note: the reply buffer must at minimum allocate enough space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	 * for the 'struct accepted_reply' from RFC5531.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			max_t(size_t, proc->p_replen, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	req->rq_rcvsize <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	status = xprt->ops->buf_alloc(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	trace_rpc_buf_alloc(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (status != -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		task->tk_action = call_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		rpc_delay(task, HZ>>4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	rpc_call_rpcerror(task, -ERESTARTSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) rpc_task_need_encode(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		(!(task->tk_flags & RPC_TASK_SENT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		 !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		 xprt_request_need_retransmit(task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) rpc_xdr_encode(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	struct rpc_rqst	*req = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	struct xdr_stream xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	xdr_buf_init(&req->rq_snd_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		     req->rq_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		     req->rq_callsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	xdr_buf_init(&req->rq_rcv_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		     req->rq_rbuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		     req->rq_rcvsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	req->rq_reply_bytes_recvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	req->rq_snd_buf.head[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	xdr_init_encode(&xdr, &req->rq_snd_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			req->rq_snd_buf.head[0].iov_base, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	xdr_free_bvec(&req->rq_snd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	if (rpc_encode_header(task, &xdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	task->tk_status = rpcauth_wrap_req(task, &xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * 3.	Encode arguments of an RPC call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) call_encode(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	if (!rpc_task_need_encode(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	/* Dequeue task from the receive queue while we're encoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	xprt_request_dequeue_xprt(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/* Encode here so that rpcsec_gss can use correct sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	rpc_xdr_encode(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	/* Did the encode result in an error condition? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	if (task->tk_status != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		/* Was the error nonfatal? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			rpc_delay(task, HZ >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		case -EKEYEXPIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			if (!task->tk_cred_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 				rpc_exit(task, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 				task->tk_action = call_refresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 				task->tk_cred_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 				trace_rpc_retry_refresh_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 			rpc_call_rpcerror(task, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	/* Add task to reply queue before transmission to avoid races */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (rpc_reply_expected(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		xprt_request_enqueue_receive(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	xprt_request_enqueue_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	task->tk_action = call_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	/* Check that the connection is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	if (!xprt_bound(task->tk_xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		task->tk_action = call_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	else if (!xprt_connected(task->tk_xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		task->tk_action = call_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  * Helpers to check if the task was already transmitted, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  * to take action when that is the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) rpc_task_transmitted(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rpc_task_handle_transmitted(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	xprt_end_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	task->tk_action = call_transmit_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)  * 4.	Get the server port number if not yet set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) call_bind(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		rpc_task_handle_transmitted(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	if (xprt_bound(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		task->tk_action = call_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	task->tk_action = call_bind_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (!xprt_prepare_transmit(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	xprt->ops->rpcbind(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  * 4a.	Sort out bind result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) call_bind_status(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	int status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		rpc_task_handle_transmitted(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (task->tk_status >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		goto out_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	if (xprt_bound(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		goto out_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		rpc_delay(task, HZ >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		trace_rpcb_prog_unavail_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		/* fail immediately if this is an RPC ping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (task->tk_msg.rpc_proc->p_proc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			status = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		if (task->tk_rebind_retry == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		task->tk_rebind_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		rpc_delay(task, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		rpc_delay(task, HZ >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		trace_rpcb_timeout_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	case -EPFNOSUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		/* server doesn't support any rpcbind version we know of */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		trace_rpcb_bind_version_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	case -EPROTONOSUPPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		trace_rpcb_bind_version_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	case -ECONNREFUSED:		/* connection problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	case -ECONNABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		trace_rpcb_unreachable_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		if (!RPC_IS_SOFTCONN(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 			rpc_delay(task, 5*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 			goto retry_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		trace_rpcb_unrecognized_err(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) out_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	task->tk_action = call_connect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) retry_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	task->tk_action = call_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)  * 4b.	Connect to the RPC server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) call_connect(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		rpc_task_handle_transmitted(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	if (xprt_connected(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		task->tk_action = call_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	task->tk_action = call_connect_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	if (task->tk_status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	if (task->tk_flags & RPC_TASK_NOCONNECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		rpc_call_rpcerror(task, -ENOTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	if (!xprt_prepare_transmit(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	xprt_connect(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)  * 4c.	Sort out connect result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) call_connect_status(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	struct rpc_clnt *clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	int status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		rpc_task_handle_transmitted(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	trace_rpc_connect_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	if (task->tk_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		clnt->cl_stats->netreconn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		goto out_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (xprt_connected(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		goto out_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		/* A positive refusal suggests a rebind is needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		if (RPC_IS_SOFTCONN(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		if (clnt->cl_autobind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			rpc_force_rebind(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 			goto out_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	case -ECONNABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 					    task->tk_rqstp->rq_connect_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		if (RPC_IS_SOFTCONN(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		/* retry with existing socket, after a delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		rpc_delay(task, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		goto out_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		rpc_delay(task, HZ >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		goto out_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) out_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	task->tk_action = call_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) out_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	/* Check for timeouts before looping back to call_bind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	task->tk_action = call_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)  * 5.	Transmit the RPC request, and wait for reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) call_transmit(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		rpc_task_handle_transmitted(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	task->tk_action = call_transmit_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	if (!xprt_prepare_transmit(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		if (!xprt_connected(task->tk_xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 			task->tk_status = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		xprt_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	xprt_end_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * 5a.	Handle cleanup after a transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) call_transmit_status(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	task->tk_action = call_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	 * Common case: success.  Force the compiler to put this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	 * test first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (rpc_task_transmitted(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		xprt_request_wait_receive(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	case -EBADMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		 * Special cases: if we've been waiting on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		 * socket's write_space() callback, or if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		 * socket just returned a connection error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		 * then hold onto the transport lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		rpc_delay(task, HZ>>2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	case -EBADSLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		task->tk_action = call_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		if (RPC_IS_SOFTCONN(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 			if (!task->tk_msg.rpc_proc->p_proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 				trace_xprt_ping(task->tk_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 						task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			rpc_call_rpcerror(task, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	case -ECONNABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		task->tk_action = call_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static void call_bc_transmit(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) static void call_bc_transmit_status(struct rpc_task *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) call_bc_encode(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	xprt_request_enqueue_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	task->tk_action = call_bc_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)  * 5b.	Send the backchannel RPC reply.  On error, drop the reply.  In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)  * addition, disconnect on connectivity errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) call_bc_transmit(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	task->tk_action = call_bc_transmit_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		if (!xprt_prepare_transmit(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		xprt_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	xprt_end_transmit(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) call_bc_transmit_status(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	struct rpc_rqst *req = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	if (rpc_task_transmitted(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	switch (task->tk_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		/* Success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	case -ENOBUFS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		rpc_delay(task, HZ>>2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	case -EBADSLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		task->tk_action = call_bc_transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		 * Problem reaching the server.  Disconnect and let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		 * forechannel reestablish the connection.  The server will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		 * have to retransmit the backchannel request and we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		 * reprocess it.  Since these ops are idempotent, there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		 * need to cache our reply at this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 			"error: %d\n", task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		xprt_conditional_disconnect(req->rq_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			req->rq_connect_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		 * We were unable to reply and will have to drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		 * request.  The server should reconnect and retransmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		printk(KERN_NOTICE "RPC: Could not send backchannel reply "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 			"error: %d\n", task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	task->tk_action = rpc_exit_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) #endif /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)  * 6.	Sort out the RPC call status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) call_status(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	struct rpc_clnt	*clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	int		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (!task->tk_msg.rpc_proc->p_proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		trace_xprt_ping(task->tk_xprt, task->tk_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	if (status >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		task->tk_action = call_decode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	trace_rpc_call_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	switch(status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	case -EHOSTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	case -ENETDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	case -EHOSTUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	case -ENETUNREACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		if (RPC_IS_SOFTCONN(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 			goto out_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		 * Delay any retries for 3 seconds, then handle as if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		 * were a timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		rpc_delay(task, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	case -ECONNREFUSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	case -ECONNABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	case -ENOTCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		rpc_force_rebind(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	case -EADDRINUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		rpc_delay(task, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		/* shutdown or soft timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		goto out_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		if (clnt->cl_chatty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 			printk("%s: RPC call returned error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 			       clnt->cl_program->name, -status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		goto out_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	if (status != -ECONNRESET && status != -ECONNABORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) out_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	rpc_call_rpcerror(task, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) rpc_check_connected(const struct rpc_rqst *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	/* No allocated request or transport? return true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	if (!req || !req->rq_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	return xprt_connected(req->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) rpc_check_timeout(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	struct rpc_clnt	*clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	if (RPC_SIGNALLED(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		rpc_call_rpcerror(task, -ERESTARTSYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if (xprt_adjust_timeout(task->tk_rqstp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	trace_rpc_timeout_status(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	task->tk_timeouts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		rpc_call_rpcerror(task, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	if (RPC_IS_SOFT(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		 * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		 * been sent, it should time out only if the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		 * connection gets terminally broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		    rpc_check_connected(task->tk_rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		if (clnt->cl_chatty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 			pr_notice_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 				"%s: server %s not responding, timed out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 				clnt->cl_program->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 				task->tk_xprt->servername);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		if (task->tk_flags & RPC_TASK_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 			rpc_call_rpcerror(task, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 			__rpc_call_rpcerror(task, -EIO, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		task->tk_flags |= RPC_CALL_MAJORSEEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		if (clnt->cl_chatty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			pr_notice_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 				"%s: server %s not responding, still trying\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 				clnt->cl_program->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 				task->tk_xprt->servername);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	rpc_force_rebind(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	 * Did our request time out due to an RPCSEC_GSS out-of-sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	 * event? RFC2203 requires the server to drop all such requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	rpcauth_invalcred(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)  * 7.	Decode the RPC reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) call_decode(struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	struct rpc_clnt	*clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct rpc_rqst	*req = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	struct xdr_stream xdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	if (!task->tk_msg.rpc_proc->p_decode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		task->tk_action = rpc_exit_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	if (task->tk_flags & RPC_CALL_MAJORSEEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		if (clnt->cl_chatty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 			pr_notice_ratelimited("%s: server %s OK\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 				clnt->cl_program->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 				task->tk_xprt->servername);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		task->tk_flags &= ~RPC_CALL_MAJORSEEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	 * Did we ever call xprt_complete_rqst()? If not, we should assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 	 * the message is incomplete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (!req->rq_reply_bytes_recvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	/* Ensure that we see all writes made by xprt_complete_rqst()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	 * before it changed req->rq_reply_bytes_recvd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 	req->rq_rcv_buf.len = req->rq_private_buf.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	/* Check that the softirq receive buffer is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 				sizeof(req->rq_rcv_buf)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	xdr_init_decode(&xdr, &req->rq_rcv_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			req->rq_rcv_buf.head[0].iov_base, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	err = rpc_decode_header(task, &xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		task->tk_action = rpc_exit_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 		task->tk_status = rpcauth_unwrap_resp(task, &xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 		task->tk_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		if (task->tk_client->cl_discrtry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			xprt_conditional_disconnect(req->rq_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 						    req->rq_connect_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 		task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	case -EKEYREJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		task->tk_action = call_reserve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		rpc_check_timeout(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		rpcauth_invalcred(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		/* Ensure we obtain a new XID if we retry! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		xprt_release(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	struct rpc_clnt *clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	struct rpc_rqst	*req = task->tk_rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	error = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	*p++ = req->rq_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	*p++ = rpc_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	*p++ = cpu_to_be32(RPC_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	*p++ = cpu_to_be32(clnt->cl_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	*p++ = cpu_to_be32(clnt->cl_vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	*p   = cpu_to_be32(task->tk_msg.rpc_proc->p_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	error = rpcauth_marshcred(task, xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	trace_rpc_bad_callhdr(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	rpc_call_rpcerror(task, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) static noinline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	struct rpc_clnt *clnt = task->tk_client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	__be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	/* RFC-1014 says that the representation of XDR data must be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	 * multiple of four bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	 * - if it isn't pointer subtraction in the NFS client may give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	 *   undefined results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	if (task->tk_rqstp->rq_rcv_buf.len & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	p++;	/* skip XID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	if (*p++ != rpc_reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	if (*p++ != rpc_msg_accepted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		goto out_msg_denied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	error = rpcauth_checkverf(task, xdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		goto out_verifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	p = xdr_inline_decode(xdr, sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	switch (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	case rpc_success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	case rpc_prog_unavail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		trace_rpc__prog_unavail(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		error = -EPFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	case rpc_prog_mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		trace_rpc__prog_mismatch(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		error = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	case rpc_proc_unavail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		trace_rpc__proc_unavail(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 		error = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	case rpc_garbage_args:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	case rpc_system_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		trace_rpc__garbage_args(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) out_garbage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	clnt->cl_stats->rpcgarbage++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	if (task->tk_garb_retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		task->tk_garb_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	rpc_call_rpcerror(task, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) out_unparsable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	trace_rpc__unparsable(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	goto out_garbage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) out_verifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	trace_rpc_bad_verifier(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	goto out_garbage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) out_msg_denied:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	error = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	p = xdr_inline_decode(xdr, sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	switch (*p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	case rpc_auth_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	case rpc_mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		trace_rpc__mismatch(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 		error = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	p = xdr_inline_decode(xdr, sizeof(*p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	switch (*p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	case rpc_autherr_rejectedcred:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	case rpc_autherr_rejectedverf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	case rpcsec_gsserr_credproblem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	case rpcsec_gsserr_ctxproblem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 		if (!task->tk_cred_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 		task->tk_cred_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		trace_rpc__stale_creds(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		return -EKEYREJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	case rpc_autherr_badcred:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	case rpc_autherr_badverf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		/* possibly garbled cred/verf? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		if (!task->tk_garb_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		task->tk_garb_retry--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		trace_rpc__bad_creds(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		task->tk_action = call_encode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	case rpc_autherr_tooweak:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		trace_rpc__auth_tooweak(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		pr_warn("RPC: server %s requires stronger authentication.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 			task->tk_xprt->servername);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 		goto out_unparsable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		const void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 		void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) static const struct rpc_procinfo rpcproc_null = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	.p_encode = rpcproc_encode_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	.p_decode = rpcproc_decode_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static int rpc_ping(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	struct rpc_message msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		.rpc_proc = &rpcproc_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 			    RPC_TASK_NULLCREDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		struct rpc_xprt *xprt, struct rpc_cred *cred, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		const struct rpc_call_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	struct rpc_message msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		.rpc_proc = &rpcproc_null,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	struct rpc_task_setup task_setup_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		.rpc_client = clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		.rpc_xprt = xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		.rpc_message = &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		.rpc_op_cred = cred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		.callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		.callback_data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		.flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			 RPC_TASK_NULLCREDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	return rpc_run_task(&task_setup_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) EXPORT_SYMBOL_GPL(rpc_call_null);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) struct rpc_cb_add_xprt_calldata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	struct rpc_cb_add_xprt_calldata *data = calldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (task->tk_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		rpc_xprt_switch_add_xprt(data->xps, data->xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) static void rpc_cb_add_xprt_release(void *calldata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	struct rpc_cb_add_xprt_calldata *data = calldata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	xprt_put(data->xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	xprt_switch_put(data->xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	.rpc_call_done = rpc_cb_add_xprt_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	.rpc_release = rpc_cb_add_xprt_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * @clnt: pointer to struct rpc_clnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * @xps: pointer to struct rpc_xprt_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * @xprt: pointer struct rpc_xprt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  * @dummy: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		struct rpc_xprt_switch *xps, struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	struct rpc_cb_add_xprt_calldata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	data = kmalloc(sizeof(*data), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 	data->xps = xprt_switch_get(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	data->xprt = xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		rpc_cb_add_xprt_release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			&rpc_cb_add_xprt_call_ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	rpc_put_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)  * rpc_clnt_setup_test_and_add_xprt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)  * This is an rpc_clnt_add_xprt setup() function which returns 1 so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)  *   1) caller of the test function must dereference the rpc_xprt_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)  *   and the rpc_xprt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)  *   2) test function must call rpc_xprt_switch_add_xprt, usually in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)  *   the rpc_call_done routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)  * Upon success (return of 1), the test function adds the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)  * transport to the rpc_clnt xprt switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)  * @clnt: struct rpc_clnt to get the new transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)  * @xps:  the rpc_xprt_switch to hold the new transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)  * @xprt: the rpc_xprt to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)  * @data: a struct rpc_add_xprt_test pointer that holds the test function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)  *        and test function call data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 				     struct rpc_xprt_switch *xps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 				     struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 				     void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	struct rpc_add_xprt_test *xtest = (struct rpc_add_xprt_test *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	int status = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	xprt = xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	xprt_switch_get(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	/* Test the connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	if (IS_ERR(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		status = PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	status = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	rpc_put_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	/* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	xtest->add_xprt_test(clnt, xprt, xtest->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	/* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	pr_info("RPC:   rpc_clnt_test_xprt failed: %d addr %s not added\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		status, xprt->address_strings[RPC_DISPLAY_ADDR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)  * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)  * @clnt: pointer to struct rpc_clnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)  * @xprtargs: pointer to struct xprt_create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)  * @setup: callback to test and/or set up the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)  * @data: pointer to setup function data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)  * Creates a new transport using the parameters set in args and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)  * adds it to clnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)  * If ping is set, then test that connectivity succeeds before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)  * adding the new transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 		struct xprt_create *xprtargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		int (*setup)(struct rpc_clnt *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 			struct rpc_xprt_switch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 			struct rpc_xprt *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 			void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	struct rpc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	unsigned long connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	unsigned long reconnect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	unsigned char resvport, reuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	xprt = xprt_iter_xprt(&clnt->cl_xpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	if (xps == NULL || xprt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	resvport = xprt->resvport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	reuseport = xprt->reuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	connect_timeout = xprt->connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	reconnect_timeout = xprt->max_reconnect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	xprt = xprt_create_transport(xprtargs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	if (IS_ERR(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		ret = PTR_ERR(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 		goto out_put_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	xprt->resvport = resvport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	xprt->reuseport = reuseport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	if (xprt->ops->set_connect_timeout != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		xprt->ops->set_connect_timeout(xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 				connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 				reconnect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	rpc_xprt_switch_set_roundrobin(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	if (setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		ret = setup(clnt, xps, xprt, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 			goto out_put_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	rpc_xprt_switch_add_xprt(xps, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) out_put_xprt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) out_put_switch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	xprt_switch_put(xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) struct connect_timeout_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	unsigned long connect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	unsigned long reconnect_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	struct connect_timeout_data *timeo = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	if (xprt->ops->set_connect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		xprt->ops->set_connect_timeout(xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 				timeo->connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 				timeo->reconnect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) rpc_set_connect_timeout(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 		unsigned long connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		unsigned long reconnect_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	struct connect_timeout_data timeout = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 		.connect_timeout = connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		.reconnect_timeout = reconnect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	rpc_clnt_iterate_for_each_xprt(clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 			rpc_xprt_set_connect_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 			&timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) EXPORT_SYMBOL_GPL(rpc_set_connect_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) void rpc_clnt_xprt_switch_put(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	xprt_switch_put(rcu_dereference(clnt->cl_xpi.xpi_xpswitch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	rpc_xprt_switch_add_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 				 xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 				   const struct sockaddr *sap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	struct rpc_xprt_switch *xps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	ret = rpc_xprt_switch_has_addr(xps, sap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) static void rpc_show_header(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		"-timeout ---ops--\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) static void rpc_show_task(const struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			  const struct rpc_task *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	const char *rpc_waitq = "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	if (RPC_IS_QUEUED(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		rpc_waitq = rpc_qname(task->tk_waitqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		task->tk_pid, task->tk_flags, task->tk_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		task->tk_action, rpc_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) void rpc_show_tasks(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	struct rpc_clnt *clnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	int header = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	spin_lock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		spin_lock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 		list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			if (!header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 				rpc_show_header();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 				header++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 			rpc_show_task(clnt, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		spin_unlock(&clnt->cl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	spin_unlock(&sn->rpc_client_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 		struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 		void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	return xprt_enable_swap(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) rpc_clnt_swap_activate(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (atomic_inc_return(&clnt->cl_swapper) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		return rpc_clnt_iterate_for_each_xprt(clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 				rpc_clnt_swap_activate_callback, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		struct rpc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	xprt_disable_swap(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		rpc_clnt_iterate_for_each_xprt(clnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 				rpc_clnt_swap_deactivate_callback, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) #endif /* CONFIG_SUNRPC_SWAP */