^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/net/sunrpc/svc_xprt.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Tom Tucker <tom@opengridcomputing.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sunrpc/addr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sunrpc/stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sunrpc/svc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sunrpc/svcsock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sunrpc/xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define RPCDBG_FACILITY RPCDBG_SVCXPRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static unsigned int svc_rpc_per_connection_limit __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) module_param(svc_rpc_per_connection_limit, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int svc_deferred_recv(struct svc_rqst *rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static struct cache_deferred_req *svc_defer(struct cache_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static void svc_age_temp_xprts(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void svc_delete_xprt(struct svc_xprt *xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* apparently the "standard" is that clients close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * idle connections after 5 minutes, servers after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 6 minutes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * http://nfsv4bat.org/Documents/ConnectAThon/1996/nfstcp.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int svc_conn_age_period = 6*60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* List of registered transport classes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static DEFINE_SPINLOCK(svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static LIST_HEAD(svc_xprt_class_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* SMP locking strategy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * svc_pool->sp_lock protects most of the fields of that pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * when both need to be taken (rare), svc_serv->sv_lock is first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The "service mutex" protects svc_serv->sv_nrthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * svc_sock->sk_lock protects the svc_sock->sk_deferred list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * and the ->sk_info_authunix cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * enqueued multiply. During normal transport processing this bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Providers should not manipulate this bit directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Some flags can be set to certain values at any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * providing that certain rules are followed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * XPT_CONN, XPT_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * - Can be set or cleared at any time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * - After a set, svc_xprt_enqueue must be called to enqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the transport for processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * - After a clear, the transport must be read/accepted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * If this succeeds, it must be set again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * XPT_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * - Can set at any time. It is never cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * XPT_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * - Can only be set while XPT_BUSY is held which ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * that no other thread will be using the transport or will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * try to set XPT_DEAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int svc_reg_xprt_class(struct svc_xprt_class *xcl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct svc_xprt_class *cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int res = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) INIT_LIST_HEAD(&xcl->xcl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_lock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Make sure there isn't already a class with the same name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) spin_unlock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) spin_lock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) list_del_init(&xcl->xcl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_unlock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * svc_print_xprts - Format the transport list for printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @buf: target buffer for formatted address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @maxlen: length of target buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Fills in @buf with a string containing a list of transport names, each name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * terminated with '\n'. If the buffer is too small, some entries may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * missing, but it is guaranteed that all lines in the output buffer are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Returns positive length of the filled-in string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int svc_print_xprts(char *buf, int maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct svc_xprt_class *xcl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) char tmpstr[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) buf[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_lock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) xcl->xcl_name, xcl->xcl_max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) len += slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) strcat(buf, tmpstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_unlock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void svc_xprt_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct svc_xprt *xprt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) container_of(kref, struct svc_xprt, xpt_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct module *owner = xprt->xpt_class->xcl_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) svcauth_unix_info_release(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) put_cred(xprt->xpt_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) put_net(xprt->xpt_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* See comment on corresponding get in xs_setup_bc_tcp(): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (xprt->xpt_bc_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) xprt_put(xprt->xpt_bc_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (xprt->xpt_bc_xps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) xprt_switch_put(xprt->xpt_bc_xps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) trace_svc_xprt_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) xprt->xpt_ops->xpo_free(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) module_put(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void svc_xprt_put(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kref_put(&xprt->xpt_ref, svc_xprt_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) EXPORT_SYMBOL_GPL(svc_xprt_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Called by transport drivers to initialize the transport independent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * portion of the transport instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct svc_xprt *xprt, struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) memset(xprt, 0, sizeof(*xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xprt->xpt_class = xcl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) xprt->xpt_ops = xcl->xcl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kref_init(&xprt->xpt_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) xprt->xpt_server = serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) INIT_LIST_HEAD(&xprt->xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) INIT_LIST_HEAD(&xprt->xpt_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) INIT_LIST_HEAD(&xprt->xpt_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) INIT_LIST_HEAD(&xprt->xpt_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mutex_init(&xprt->xpt_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock_init(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) set_bit(XPT_BUSY, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) xprt->xpt_net = get_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) strcpy(xprt->xpt_remotebuf, "uninitialized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) EXPORT_SYMBOL_GPL(svc_xprt_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct svc_serv *serv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) const int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) const unsigned short port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct sockaddr_in sin = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .sin_family = AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .sin_addr.s_addr = htonl(INADDR_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .sin_port = htons(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sockaddr_in6 sin6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .sin6_family = AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .sin6_addr = IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .sin6_port = htons(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct sockaddr *sap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) case PF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) sap = (struct sockaddr *)&sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len = sizeof(sin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case PF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sap = (struct sockaddr *)&sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) len = sizeof(sin6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return ERR_PTR(-EAFNOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (IS_ERR(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) trace_svc_xprt_create_err(serv->sv_program->pg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) xcl->xcl_name, sap, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * svc_xprt_received conditionally queues the transport for processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * by another thread. The caller must hold the XPT_BUSY bit and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * not thereafter touch transport data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * insufficient) data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void svc_xprt_received(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* As soon as we clear busy, the xprt could be closed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * 'put', so we need a reference to call svc_enqueue_xprt with:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) svc_xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) clear_bit(XPT_BUSY, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) clear_bit(XPT_TEMP, &new->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) list_add(&new->xpt_list, &serv->sv_permsocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) svc_xprt_received(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct net *net, const int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) const unsigned short port, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct svc_xprt_class *xcl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_lock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct svc_xprt *newxprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned short newport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (strcmp(xprt_name, xcl->xcl_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (!try_module_get(xcl->xcl_owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) spin_unlock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (IS_ERR(newxprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) module_put(xcl->xcl_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return PTR_ERR(newxprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) newxprt->xpt_cred = get_cred(cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) svc_add_new_perm_xprt(serv, newxprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) newport = svc_xprt_local_port(newxprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return newport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_unlock(&svc_xprt_class_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* This errno is exposed to user space. Provide a reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * perror msg for a bad transport. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct net *net, const int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) const unsigned short port, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (err == -EPROTONOSUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) request_module("svc%s", xprt_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) EXPORT_SYMBOL_GPL(svc_create_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Copy the local and remote xprt addresses to the rqstp structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rqstp->rq_addrlen = xprt->xpt_remotelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Destination address in request is needed for binding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * source address in RPC replies/callbacks later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rqstp->rq_daddrlen = xprt->xpt_locallen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * svc_print_addr - Format rq_addr field for printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @rqstp: svc_rqst struct containing address to print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @buf: target buffer for formatted address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @len: length of target buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return __svc_print_addr(svc_addr(rqstp), buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) EXPORT_SYMBOL_GPL(svc_print_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned int limit = svc_rpc_per_connection_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!svc_xprt_slots_in_range(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) atomic_inc(&xprt->xpt_nr_rqsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) set_bit(RQ_DATA, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void svc_xprt_release_slot(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct svc_xprt *xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) atomic_dec(&xprt->xpt_nr_rqsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static bool svc_xprt_ready(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned long xpt_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * If another cpu has recently updated xpt_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * know about it; otherwise it's possible that both that cpu and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * this one could call svc_xprt_enqueue() without either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * svc_xprt_enqueue() recognizing that the conditions below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * are satisfied, and we could stall indefinitely:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) xpt_flags = READ_ONCE(xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) svc_xprt_slots_in_range(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) trace_svc_xprt_no_write_space(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void svc_xprt_do_enqueue(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct svc_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct svc_rqst *rqstp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!svc_xprt_ready(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Mark transport as busy. It will remain in this state until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * the provider calls svc_xprt_received. We update XPT_BUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * atomically because it also guards against trying to enqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * the transport twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) atomic_long_inc(&pool->sp_stats.packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pool->sp_stats.sockets_queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* find a thread for this xprt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) atomic_long_inc(&pool->sp_stats.threads_woken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rqstp->rq_qtime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) wake_up_process(rqstp->rq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) set_bit(SP_CONGESTED, &pool->sp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) rqstp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) trace_svc_xprt_do_enqueue(xprt, rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Queue up a transport with data pending. If there are idle nfsd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * processes, wake 'em up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void svc_xprt_enqueue(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (test_bit(XPT_BUSY, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Dequeue the first transport, if there is one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct svc_xprt *xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (list_empty(&pool->sp_sockets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (likely(!list_empty(&pool->sp_sockets))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) xprt = list_first_entry(&pool->sp_sockets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct svc_xprt, xpt_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_del_init(&xprt->xpt_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) svc_xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * svc_reserve - change the space reserved for the reply to a request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @rqstp: The request in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @space: new max space to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Each request reserves some space on the output queue of the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * to make sure the reply fits. This function reduces that reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * space to be the amount of space used already, plus @space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) void svc_reserve(struct svc_rqst *rqstp, int space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct svc_xprt *xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) space += rqstp->rq_res.head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (xprt && space < rqstp->rq_reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rqstp->rq_reserved = space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) EXPORT_SYMBOL_GPL(svc_reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void svc_xprt_release(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct svc_xprt *xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) xprt->xpt_ops->xpo_release_rqst(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) kfree(rqstp->rq_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rqstp->rq_deferred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) svc_free_res_pages(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rqstp->rq_res.page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rqstp->rq_res.page_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Reset response buffer and release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * the reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * But first, check that enough space was reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * for the reply, otherwise we have a bug!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if ((rqstp->rq_res.len) > rqstp->rq_reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) printk(KERN_ERR "RPC request reserved %d but used %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) rqstp->rq_reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) rqstp->rq_res.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rqstp->rq_res.head[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) svc_reserve(rqstp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) svc_xprt_release_slot(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rqstp->rq_xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * Some svc_serv's will have occasional work to do, even when a xprt is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * waiting to be serviced. This function is there to "kick" a task in one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * those services so that it can wake up and do that work. Note that we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * bother with pool 0 as we don't need to wake up more than one thread for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * this purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void svc_wake_up(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct svc_rqst *rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct svc_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) pool = &serv->sv_pools[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* skip any that aren't queued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (test_bit(RQ_BUSY, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) wake_up_process(rqstp->rq_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) trace_svc_wake_up(rqstp->rq_task->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* No free entries available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) set_bit(SP_TASK_PENDING, &pool->sp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) trace_svc_wake_up(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) EXPORT_SYMBOL_GPL(svc_wake_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int svc_port_is_privileged(struct sockaddr *sin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) switch (sin->sa_family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case AF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return ntohs(((struct sockaddr_in *)sin)->sin_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) < PROT_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) case AF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) < PROT_SOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Make sure that we don't have too many active connections. If we have,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * something must be dropped. It's not clear what will happen if we allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * "too many" connections, but when dealing with network-facing software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * we have to code defensively. Here we do that by imposing hard limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * There's no point in trying to do random drop here for DoS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * prevention. The NFS clients does 1 reconnect in 15 seconds. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * attacker can easily beat that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * The only somewhat efficient mechanism would be if drop old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * connections from the same IP first. But right now we don't even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * record the client IP in svc_sock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * single-threaded services that expect a lot of clients will probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * need to set sv_maxconn to override the default value which is based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * on the number of threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void svc_check_conn_limits(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) (serv->sv_nrthreads+3) * 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (serv->sv_tmpcnt > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct svc_xprt *xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!list_empty(&serv->sv_tempsocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Try to help the admin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) serv->sv_name, serv->sv_maxconn ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) "max number of connections" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) "number of threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Always select the oldest connection. It's not fair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * but so is life
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) xprt = list_entry(serv->sv_tempsocks.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct svc_xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) svc_xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (xprt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static int svc_alloc_arg(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct xdr_buf *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* now allocate needed pages. If we get a failure, sleep briefly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (pages > RPCSVC_MAXPAGES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pages, RPCSVC_MAXPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* use as many pages as possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) pages = RPCSVC_MAXPAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) for (i = 0; i < pages ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) while (rqstp->rq_pages[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct page *p = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (signalled() || kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) schedule_timeout(msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rqstp->rq_pages[i] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) rqstp->rq_page_end = &rqstp->rq_pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Make arg->head point to first page and arg->pages point to rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) arg = &rqstp->rq_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) arg->head[0].iov_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) arg->pages = rqstp->rq_pages + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) arg->page_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* save at least one page for response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) arg->page_len = (pages-2)*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) arg->len = (pages-1)*PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) arg->tail[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) rqst_should_sleep(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct svc_pool *pool = rqstp->rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* did someone call svc_wake_up? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* was a socket queued? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!list_empty(&pool->sp_sockets))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* are we shutting down? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (signalled() || kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* are we freezing? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (freezing(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct svc_pool *pool = rqstp->rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) long time_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* rq_xprt should be clear on entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) WARN_ON_ONCE(rqstp->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) rqstp->rq_xprt = svc_xprt_dequeue(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (rqstp->rq_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * We have to be able to interrupt this wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * to bring down the daemons ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) clear_bit(SP_CONGESTED, &pool->sp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) clear_bit(RQ_BUSY, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (likely(rqst_should_sleep(rqstp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) time_left = schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) set_bit(RQ_BUSY, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) rqstp->rq_xprt = svc_xprt_dequeue(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (rqstp->rq_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto out_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (!time_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) atomic_long_inc(&pool->sp_stats.threads_timedout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (signalled() || kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return ERR_PTR(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) out_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Normally we will wait up to 5 seconds for any required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * cache information to be provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!test_bit(SP_CONGESTED, &pool->sp_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rqstp->rq_chandle.thread_wait = 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rqstp->rq_chandle.thread_wait = 1*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) trace_svc_xprt_dequeue(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) set_bit(XPT_TEMP, &newxpt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) serv->sv_tmpcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (serv->sv_temptimer.function == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* setup timer to age temp transports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) serv->sv_temptimer.function = svc_age_temp_xprts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mod_timer(&serv->sv_temptimer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) jiffies + svc_conn_age_period * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) svc_xprt_received(newxpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) svc_delete_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Leave XPT_BUSY set on the dead xprt: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct svc_xprt *newxpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * We know this module_get will succeed because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * listener holds a reference too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) __module_get(xprt->xpt_class->xcl_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) svc_check_conn_limits(xprt->xpt_server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) newxpt = xprt->xpt_ops->xpo_accept(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (newxpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) newxpt->xpt_cred = get_cred(xprt->xpt_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) svc_add_new_temp_xprt(serv, newxpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) trace_svc_xprt_accept(newxpt, serv->sv_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) module_put(xprt->xpt_class->xcl_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* XPT_DATA|XPT_DEFERRED case: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) rqstp, rqstp->rq_pool->sp_id, xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) kref_read(&xprt->xpt_ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) rqstp->rq_deferred = svc_deferred_dequeue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (rqstp->rq_deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) len = svc_deferred_recv(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) len = xprt->xpt_ops->xpo_recvfrom(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) rqstp->rq_stime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rqstp->rq_reserved = serv->sv_max_mesg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* clear XPT_BUSY: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) svc_xprt_received(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) trace_svc_handle_xprt(xprt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Receive the next request on any transport. This code is carefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * organised not to touch any cachelines in the shared svc_serv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * structure, only cachelines in the local svc_pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int svc_recv(struct svc_rqst *rqstp, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct svc_xprt *xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) err = svc_alloc_arg(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) try_to_freeze();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) err = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (signalled() || kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) xprt = svc_get_next_xprt(rqstp, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (IS_ERR(xprt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) err = PTR_ERR(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) len = svc_handle_xprt(rqstp, xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* No data, incomplete (TCP) read, or accept() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) goto out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) clear_bit(XPT_OLD, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) xprt->xpt_ops->xpo_secure_port(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rqstp->rq_chandle.defer = svc_defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (serv->sv_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) serv->sv_stats->netcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) trace_svc_recv(rqstp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) rqstp->rq_res.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) svc_xprt_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) EXPORT_SYMBOL_GPL(svc_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * Drop request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) void svc_drop(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) trace_svc_drop(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) svc_xprt_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) EXPORT_SYMBOL_GPL(svc_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Return reply to client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) int svc_send(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int len = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct xdr_buf *xb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* calculate over-all length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xb = &rqstp->rq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) xb->len = xb->head[0].iov_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) xb->page_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) xb->tail[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) trace_svc_xdr_sendto(rqstp, xb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) trace_svc_stats_latency(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) len = xprt->xpt_ops->xpo_sendto(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) trace_svc_send(rqstp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) svc_xprt_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Timer function to close old temporary transports, using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * a mark-and-sweep algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void svc_age_temp_xprts(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct svc_serv *serv = from_timer(serv, t, sv_temptimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct list_head *le, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dprintk("svc_age_temp_xprts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (!spin_trylock_bh(&serv->sv_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* busy, try again 1 sec later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) dprintk("svc_age_temp_xprts: busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mod_timer(&serv->sv_temptimer, jiffies + HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) list_for_each_safe(le, next, &serv->sv_tempsocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) xprt = list_entry(le, struct svc_xprt, xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* First time through, just mark it OLD. Second time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * through, close it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (kref_read(&xprt->xpt_ref) > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) test_bit(XPT_BUSY, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) list_del_init(le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dprintk("queuing xprt %p for closing\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* a thread will dequeue and close it soon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Close temporary transports whose xpt_local matches server_addr immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * instead of waiting for them to be picked up by the timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * This is meant to be called from a notifier_block that runs when an ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * address is deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct list_head *le, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) LIST_HEAD(to_be_closed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) list_for_each_safe(le, next, &serv->sv_tempsocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) xprt = list_entry(le, struct svc_xprt, xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (rpc_cmp_addr(server_addr, (struct sockaddr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) &xprt->xpt_local)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) list_move(le, &to_be_closed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) while (!list_empty(&to_be_closed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) le = to_be_closed.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) list_del_init(le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) xprt = list_entry(le, struct svc_xprt, xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void call_xpt_users(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct svc_xpt_user *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) spin_lock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) while (!list_empty(&xprt->xpt_users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) list_del_init(&u->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) u->callback(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) spin_unlock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Remove a dead transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void svc_delete_xprt(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct svc_serv *serv = xprt->xpt_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct svc_deferred_req *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) trace_svc_xprt_detach(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) xprt->xpt_ops->xpo_detach(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (xprt->xpt_bc_xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) list_del_init(&xprt->xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (test_bit(XPT_TEMP, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) serv->sv_tmpcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) while ((dr = svc_deferred_dequeue(xprt)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) call_xpt_users(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) void svc_close_xprt(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) trace_svc_xprt_close(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* someone else will have to effect the close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * We expect svc_close_xprt() to work even when no threads are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * running (e.g., while configuring the server before starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * any threads), so if the transport isn't busy, we delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * it ourself:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) svc_delete_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) EXPORT_SYMBOL_GPL(svc_close_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) list_for_each_entry(xprt, xprt_list, xpt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (xprt->xpt_net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct svc_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct svc_xprt *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) for (i = 0; i < serv->sv_nrpools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) pool = &serv->sv_pools[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (xprt->xpt_net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) list_del_init(&xprt->xpt_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) while ((xprt = svc_dequeue_net(serv, net))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) set_bit(XPT_CLOSE, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) svc_delete_xprt(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Server threads may still be running (especially in the case where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * service is still running in other network namespaces).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * So we shut down sockets the same way we would on a running server, by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * the close. In the case there are no such other threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * threads running, svc_clean_up_xprts() does a simple version of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * server's main event loop, and in the case where there are other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * threads, we may need to wait a little while and then check again to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * see if they're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void svc_close_net(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) while (svc_close_list(serv, &serv->sv_permsocks, net) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) svc_close_list(serv, &serv->sv_tempsocks, net)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) svc_clean_up_xprts(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) msleep(delay++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * Handle defer and revisit of requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct svc_deferred_req *dr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) container_of(dreq, struct svc_deferred_req, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct svc_xprt *xprt = dr->xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) spin_lock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) set_bit(XPT_DEFERRED, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) spin_unlock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) trace_svc_defer_drop(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) kfree(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) dr->xprt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) list_add(&dr->handle.recent, &xprt->xpt_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) spin_unlock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) trace_svc_defer_queue(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) svc_xprt_enqueue(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) svc_xprt_put(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * Save the request off for later processing. The request buffer looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * This code can only handle requests that consist of an xprt-header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * and rpc-header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static struct cache_deferred_req *svc_defer(struct cache_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct svc_deferred_req *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return NULL; /* if more than a page, give up FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (rqstp->rq_deferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dr = rqstp->rq_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) rqstp->rq_deferred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) size_t skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* FIXME maybe discard if size too large */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) dr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (dr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) dr->handle.owner = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) dr->prot = rqstp->rq_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dr->addrlen = rqstp->rq_addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dr->daddr = rqstp->rq_daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) dr->argslen = rqstp->rq_arg.len >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dr->xprt_hlen = rqstp->rq_xprt_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* back up head to the start of the buffer and copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dr->argslen << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) trace_svc_defer(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) svc_xprt_get(rqstp->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dr->xprt = rqstp->rq_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) set_bit(RQ_DROPME, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) dr->handle.revisit = svc_revisit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return &dr->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * recv data from a deferred request into an active one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct svc_deferred_req *dr = rqstp->rq_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) trace_svc_defer_recv(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* setup iov_base past transport header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* The iov_len does not include the transport header bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rqstp->rq_arg.page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* The rq_arg.len includes the transport header bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) rqstp->rq_arg.len = dr->argslen<<2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) rqstp->rq_prot = dr->prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) rqstp->rq_addrlen = dr->addrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Save off transport header len in case we get deferred again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) rqstp->rq_xprt_hlen = dr->xprt_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) rqstp->rq_daddr = dr->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) rqstp->rq_respages = rqstp->rq_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return (dr->argslen<<2) - dr->xprt_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct svc_deferred_req *dr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) spin_lock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (!list_empty(&xprt->xpt_deferred)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dr = list_entry(xprt->xpt_deferred.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct svc_deferred_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) handle.recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) list_del_init(&dr->handle.recent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) spin_unlock(&xprt->xpt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * svc_find_xprt - find an RPC transport instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * @serv: pointer to svc_serv to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * @xcl_name: C string containing transport's class name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * @net: owner net pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * @af: Address family of transport's local address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * @port: transport's IP port number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * Return the transport instance pointer for the endpoint accepting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * connections/peer traffic from the specified transport class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * address family and port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Specifying 0 for the address family or port is effectively a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * wild-card, and will result in matching the first transport in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * service's list that has a matching class name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct net *net, const sa_family_t af,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) const unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct svc_xprt *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* Sanity check the args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (serv == NULL || xcl_name == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (xprt->xpt_net != net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (port != 0 && port != svc_xprt_local_port(xprt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) found = xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) svc_xprt_get(xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) EXPORT_SYMBOL_GPL(svc_find_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static int svc_one_xprt_name(const struct svc_xprt *xprt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) char *pos, int remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) len = snprintf(pos, remaining, "%s %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) xprt->xpt_class->xcl_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) svc_xprt_local_port(xprt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (len >= remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * svc_xprt_names - format a buffer with a list of transport names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * @serv: pointer to an RPC service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * @buf: pointer to a buffer to be filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * @buflen: length of buffer to be filled in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Fills in @buf with a string containing a list of transport names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * each name terminated with '\n'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * Returns positive length of the filled-in string on success; otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * a negative errno value is returned if an error occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct svc_xprt *xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int len, totlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) char *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* Sanity check args */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (!serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) spin_lock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) pos = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) totlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) len = svc_one_xprt_name(xprt, pos, buflen - totlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) *buf = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) totlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) pos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) totlen += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) spin_unlock_bh(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return totlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) EXPORT_SYMBOL_GPL(svc_xprt_names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /*----------------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) unsigned int pidx = (unsigned int)*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct svc_serv *serv = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (!pidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct svc_pool *pool = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct svc_serv *serv = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (p == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) pool = &serv->sv_pools[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) unsigned int pidx = (pool - &serv->sv_pools[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (pidx < serv->sv_nrpools-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) pool = &serv->sv_pools[pidx+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void svc_pool_stats_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int svc_pool_stats_show(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct svc_pool *pool = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (p == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) seq_printf(m, "%u %lu %lu %lu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) pool->sp_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) (unsigned long)atomic_long_read(&pool->sp_stats.packets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) pool->sp_stats.sockets_queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static const struct seq_operations svc_pool_stats_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) .start = svc_pool_stats_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) .next = svc_pool_stats_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) .stop = svc_pool_stats_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) .show = svc_pool_stats_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) err = seq_open(file, &svc_pool_stats_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ((struct seq_file *) file->private_data)->private = serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) EXPORT_SYMBOL(svc_pool_stats_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*----------------------------------------------------------------------------*/