^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/net/sunrpc/svc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * High-level RPC service routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Multiple threads pools and NUMAisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2006 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * by Greg Banks <gnb@melbourne.sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sunrpc/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sunrpc/xdr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sunrpc/stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sunrpc/svcsock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sunrpc/clnt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sunrpc/bc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <trace/events/sunrpc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RPCDBG_FACILITY RPCDBG_SVCDSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void svc_unregister(const struct svc_serv *serv, struct net *net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Structure for mapping cpus to pools and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Setup once during sunrpc initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct svc_pool_map svc_pool_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .mode = SVC_POOL_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) EXPORT_SYMBOL_GPL(svc_pool_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) param_set_pool_mode(const char *val, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int *ip = (int *)kp->arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mutex_lock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (m->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!strncmp(val, "auto", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *ip = SVC_POOL_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) else if (!strncmp(val, "global", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *ip = SVC_POOL_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) else if (!strncmp(val, "percpu", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *ip = SVC_POOL_PERCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) else if (!strncmp(val, "pernode", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *ip = SVC_POOL_PERNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mutex_unlock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) param_get_pool_mode(char *buf, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int *ip = (int *)kp->arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) switch (*ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case SVC_POOL_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return strlcpy(buf, "auto\n", 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) case SVC_POOL_GLOBAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return strlcpy(buf, "global\n", 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) case SVC_POOL_PERCPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return strlcpy(buf, "percpu\n", 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) case SVC_POOL_PERNODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return strlcpy(buf, "pernode\n", 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return sprintf(buf, "%d\n", *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) &svc_pool_map.mode, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Detect best pool mapping mode heuristically,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * according to the machine's topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) svc_pool_map_choose_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (nr_online_nodes > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Actually have multiple NUMA nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * so split pools on NUMA node boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return SVC_POOL_PERNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) node = first_online_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (nr_cpus_node(node) > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Non-trivial SMP, or CONFIG_NUMA on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * non-NUMA hardware, e.g. with a generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * x86_64 kernel on Xeons. In this case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * want to divide the pools on cpu boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return SVC_POOL_PERCPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* default: one global pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return SVC_POOL_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Allocate the to_pool[] and pool_to[] arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Returns 0 on success or an errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!m->to_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!m->pool_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto fail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) fail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) kfree(m->to_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) m->to_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Initialise the pool map for SVC_POOL_PERCPU mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Returns number of pools or <0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) svc_pool_map_init_percpu(struct svc_pool_map *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int maxpools = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) err = svc_pool_map_alloc_arrays(m, maxpools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) BUG_ON(pidx >= maxpools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) m->to_pool[cpu] = pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) m->pool_to[pidx] = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* cpus brought online later all get mapped to pool0, sorry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Initialise the pool map for SVC_POOL_PERNODE mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Returns number of pools or <0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) svc_pool_map_init_pernode(struct svc_pool_map *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned int maxpools = nr_node_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned int pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) err = svc_pool_map_alloc_arrays(m, maxpools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for_each_node_with_cpus(node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* some architectures (e.g. SN2) have cpuless nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) BUG_ON(pidx > maxpools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) m->to_pool[node] = pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) m->pool_to[pidx] = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* nodes brought online later all get mapped to pool0, sorry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Add a reference to the global map of cpus to pools (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * vice versa). Initialise the map if we're the first user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * Returns the number of pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) svc_pool_map_get(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int npools = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mutex_lock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (m->count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) mutex_unlock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return m->npools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (m->mode == SVC_POOL_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) m->mode = svc_pool_map_choose_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) switch (m->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case SVC_POOL_PERCPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) npools = svc_pool_map_init_percpu(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case SVC_POOL_PERNODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) npools = svc_pool_map_init_pernode(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (npools < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* default, or memory allocation failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) npools = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) m->mode = SVC_POOL_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) m->npools = npools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) mutex_unlock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return m->npools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL_GPL(svc_pool_map_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Drop a reference to the global map of cpus to pools.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * When the last reference is dropped, the map data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * freed; this allows the sysadmin to change the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * mode using the pool_mode module option without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * rebooting or re-loading sunrpc.ko.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) svc_pool_map_put(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mutex_lock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (!--m->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kfree(m->to_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) m->to_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kfree(m->pool_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) m->pool_to = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) m->npools = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) mutex_unlock(&svc_pool_map_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) EXPORT_SYMBOL_GPL(svc_pool_map_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int svc_pool_map_get_node(unsigned int pidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) const struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (m->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (m->mode == SVC_POOL_PERCPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return cpu_to_node(m->pool_to[pidx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (m->mode == SVC_POOL_PERNODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return m->pool_to[pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * Set the given thread's cpus_allowed mask so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * will only run on cpus in the given pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int node = m->pool_to[pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * The caller checks for sv_nrpools > 1, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * implies that we've been initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) WARN_ON_ONCE(m->count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (m->count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) switch (m->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case SVC_POOL_PERCPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) set_cpus_allowed_ptr(task, cpumask_of(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case SVC_POOL_PERNODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) set_cpus_allowed_ptr(task, cpumask_of_node(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Use the mapping mode to choose a pool for a given CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Used when enqueueing an incoming RPC. Always returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * a non-NULL pool pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct svc_pool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) svc_pool_for_cpu(struct svc_serv *serv, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct svc_pool_map *m = &svc_pool_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned int pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * An uninitialised map happens in a pure client when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * lockd is brought up, so silently treat it the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * same as SVC_POOL_GLOBAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (svc_serv_is_pooled(serv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) switch (m->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) case SVC_POOL_PERCPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pidx = m->to_pool[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case SVC_POOL_PERNODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pidx = m->to_pool[cpu_to_node(cpu)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return &serv->sv_pools[pidx % serv->sv_nrpools];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) err = rpcb_create_local(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Remove any stale portmap registrations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) svc_unregister(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) EXPORT_SYMBOL_GPL(svc_rpcb_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) svc_unregister(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rpcb_put_local(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int svc_uses_rpcbind(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct svc_program *progp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) for (progp = serv->sv_program; progp; progp = progp->pg_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) for (i = 0; i < progp->pg_nvers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (progp->pg_vers[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!progp->pg_vers[i]->vs_hidden)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int svc_bind(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!svc_uses_rpcbind(serv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return svc_rpcb_setup(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) EXPORT_SYMBOL_GPL(svc_bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) __svc_init_bc(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) INIT_LIST_HEAD(&serv->sv_cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_lock_init(&serv->sv_cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) init_waitqueue_head(&serv->sv_cb_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __svc_init_bc(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Create an RPC service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static struct svc_serv *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) const struct svc_serv_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct svc_serv *serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned int vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned int xdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) serv->sv_name = prog->pg_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) serv->sv_program = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) serv->sv_nrthreads = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) serv->sv_stats = prog->pg_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (bufsize > RPCSVC_MAXPAYLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) bufsize = RPCSVC_MAXPAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) serv->sv_max_payload = bufsize? bufsize : 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) serv->sv_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) xdrsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) while (prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) prog->pg_lovers = prog->pg_nvers-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) for (vers=0; vers<prog->pg_nvers ; vers++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (prog->pg_vers[vers]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) prog->pg_hivers = vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (prog->pg_lovers > vers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) prog->pg_lovers = vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) xdrsize = prog->pg_vers[vers]->vs_xdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) prog = prog->pg_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) serv->sv_xdrsize = xdrsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) INIT_LIST_HEAD(&serv->sv_tempsocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) INIT_LIST_HEAD(&serv->sv_permsocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) timer_setup(&serv->sv_temptimer, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_init(&serv->sv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) __svc_init_bc(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) serv->sv_nrpools = npools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) serv->sv_pools =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!serv->sv_pools) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) kfree(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) for (i = 0; i < serv->sv_nrpools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct svc_pool *pool = &serv->sv_pools[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dprintk("svc: initialising pool %u for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) i, serv->sv_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pool->sp_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) INIT_LIST_HEAD(&pool->sp_sockets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) INIT_LIST_HEAD(&pool->sp_all_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_lock_init(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct svc_serv *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) svc_create(struct svc_program *prog, unsigned int bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) const struct svc_serv_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return __svc_create(prog, bufsize, /*npools*/1, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) EXPORT_SYMBOL_GPL(svc_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct svc_serv *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) const struct svc_serv_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct svc_serv *serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned int npools = svc_pool_map_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) serv = __svc_create(prog, bufsize, npools, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) svc_pool_map_put();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) EXPORT_SYMBOL_GPL(svc_create_pooled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void svc_shutdown_net(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) svc_close_net(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (serv->sv_ops->svo_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) serv->sv_ops->svo_shutdown(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) EXPORT_SYMBOL_GPL(svc_shutdown_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Destroy an RPC service. Should be called with appropriate locking to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) svc_destroy(struct svc_serv *serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dprintk("svc: svc_destroy(%s, %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) serv->sv_program->pg_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) serv->sv_nrthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (serv->sv_nrthreads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (--(serv->sv_nrthreads) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) svc_sock_update_bufs(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) printk("svc_destroy: no threads for serv=%p!\n", serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) del_timer_sync(&serv->sv_temptimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * The last user is gone and thus all sockets have to be destroyed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * the point. Check this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) BUG_ON(!list_empty(&serv->sv_permsocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) BUG_ON(!list_empty(&serv->sv_tempsocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cache_clean_deferred(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (svc_serv_is_pooled(serv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) svc_pool_map_put();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) kfree(serv->sv_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) kfree(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) EXPORT_SYMBOL_GPL(svc_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Allocate an RPC server's buffer space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * We allocate pages and place them in rq_argpages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned int pages, arghi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* bc_xprt uses fore channel allocated buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (svc_is_backchannel(rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * We assume one is at most one page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) arghi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (pages > RPCSVC_MAXPAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pages = RPCSVC_MAXPAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) while (pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rqstp->rq_pages[arghi++] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return pages == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Release an RPC server buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) svc_release_buffer(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (rqstp->rq_pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) put_page(rqstp->rq_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct svc_rqst *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct svc_rqst *rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) __set_bit(RQ_BUSY, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) spin_lock_init(&rqstp->rq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) rqstp->rq_server = serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) rqstp->rq_pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!rqstp->rq_argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto out_enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!rqstp->rq_resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto out_enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) goto out_enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) out_enomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) svc_rqst_free(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EXPORT_SYMBOL_GPL(svc_rqst_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct svc_rqst *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct svc_rqst *rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) rqstp = svc_rqst_alloc(serv, pool, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) serv->sv_nrthreads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) pool->sp_nrthreads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) EXPORT_SYMBOL_GPL(svc_prepare_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * Choose a pool in which to create a new thread, for svc_set_num_threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static inline struct svc_pool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (pool != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Choose a thread to kill, for svc_set_num_threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static inline struct task_struct *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct task_struct *task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (pool != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* choose a pool in round-robin fashion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) for (i = 0; i < serv->sv_nrpools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!list_empty(&pool->sp_all_threads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) goto found_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) found_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!list_empty(&pool->sp_all_threads)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct svc_rqst *rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * Remove from the pool->sp_all_threads list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * so we don't try to kill it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) set_bit(RQ_VICTIM, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) list_del_rcu(&rqstp->rq_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) task = rqstp->rq_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* create new threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct svc_rqst *rqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct svc_pool *chosen_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned int state = serv->sv_nrthreads-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nrservs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) chosen_pool = choose_pool(serv, pool, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) node = svc_pool_map_get_node(chosen_pool->sp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rqstp = svc_prepare_thread(serv, chosen_pool, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (IS_ERR(rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return PTR_ERR(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) __module_get(serv->sv_ops->svo_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) node, "%s", serv->sv_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (IS_ERR(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) module_put(serv->sv_ops->svo_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) svc_exit_thread(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rqstp->rq_task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (serv->sv_nrpools > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) svc_sock_update_bufs(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) wake_up_process(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } while (nrservs > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* destroy old threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned int state = serv->sv_nrthreads-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* destroy old threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) task = choose_victim(serv, pool, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (task == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) send_sig(SIGINT, task, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) nrservs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } while (nrservs < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Create or destroy enough new threads to make the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * of threads the given number. If `pool' is non-NULL, applies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * only to threads in that pool, otherwise round-robins between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * all pools. Caller must ensure that mutual exclusion between this and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * server startup or shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * Destroying threads relies on the service threads filling in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * rqstp->rq_task, which only the nfs ones do. Assumes the serv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * has been created using svc_create_pooled().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * Based on code that used to be in nfsd_svc() but tweaked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * to be pool-aware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (pool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* The -1 assumes caller has done a svc_get() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) nrservs -= (serv->sv_nrthreads-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) nrservs -= pool->sp_nrthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (nrservs > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return svc_start_kthreads(serv, pool, nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (nrservs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return svc_signal_kthreads(serv, pool, nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) EXPORT_SYMBOL_GPL(svc_set_num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* destroy old threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned int state = serv->sv_nrthreads-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* destroy old threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) task = choose_victim(serv, pool, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (task == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) kthread_stop(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) nrservs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) } while (nrservs < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (pool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* The -1 assumes caller has done a svc_get() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) nrservs -= (serv->sv_nrthreads-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) nrservs -= pool->sp_nrthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (nrservs > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return svc_start_kthreads(serv, pool, nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (nrservs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return svc_stop_kthreads(serv, pool, nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Called from a server thread as it's exiting. Caller must hold the "service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * mutex" for the service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) svc_rqst_free(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) svc_release_buffer(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) kfree(rqstp->rq_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) kfree(rqstp->rq_argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kfree(rqstp->rq_auth_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) kfree_rcu(rqstp, rq_rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) EXPORT_SYMBOL_GPL(svc_rqst_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) svc_exit_thread(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct svc_pool *pool = rqstp->rq_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spin_lock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pool->sp_nrthreads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) list_del_rcu(&rqstp->rq_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) spin_unlock_bh(&pool->sp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) svc_rqst_free(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Release the server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) svc_destroy(serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) EXPORT_SYMBOL_GPL(svc_exit_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Register an "inet" protocol family netid with the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * rpcbind daemon via an rpcbind v4 SET request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * No netconfig infrastructure is available in the kernel, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * we map IP_ protocol numbers to netids by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Returns zero on success; a negative errno value is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * if any error occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static int __svc_rpcb_register4(struct net *net, const u32 program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) const u32 version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) const unsigned short protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) const unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) const struct sockaddr_in sin = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) .sin_family = AF_INET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .sin_addr.s_addr = htonl(INADDR_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) .sin_port = htons(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) const char *netid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) switch (protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) netid = RPCBIND_NETID_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) netid = RPCBIND_NETID_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) error = rpcb_v4_register(net, program, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) (const struct sockaddr *)&sin, netid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * User space didn't support rpcbind v4, so retry this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * registration request with the legacy rpcbind v2 protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (error == -EPROTONOSUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) error = rpcb_register(net, program, version, protocol, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Register an "inet6" protocol family netid with the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * rpcbind daemon via an rpcbind v4 SET request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * No netconfig infrastructure is available in the kernel, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * we map IP_ protocol numbers to netids by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Returns zero on success; a negative errno value is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * if any error occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) static int __svc_rpcb_register6(struct net *net, const u32 program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) const u32 version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) const unsigned short protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) const unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) const struct sockaddr_in6 sin6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .sin6_family = AF_INET6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .sin6_addr = IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .sin6_port = htons(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) const char *netid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) switch (protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) netid = RPCBIND_NETID_UDP6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) netid = RPCBIND_NETID_TCP6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) error = rpcb_v4_register(net, program, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) (const struct sockaddr *)&sin6, netid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * User space didn't support rpcbind version 4, so we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * use a PF_INET6 listener.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (error == -EPROTONOSUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) error = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #endif /* IS_ENABLED(CONFIG_IPV6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Register a kernel RPC service via rpcbind version 4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * Returns zero on success; a negative errno value is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * if any error occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static int __svc_register(struct net *net, const char *progname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) const u32 program, const u32 version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) const int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) const unsigned short protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) const unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int error = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) switch (family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) case PF_INET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) error = __svc_rpcb_register4(net, program, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) protocol, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) case PF_INET6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) error = __svc_rpcb_register6(net, program, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) protocol, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) trace_svc_register(progname, version, protocol, port, family, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int svc_rpcbind_set_version(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u32 version, int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) unsigned short proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return __svc_register(net, progp->pg_name, progp->pg_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) version, family, proto, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) int svc_generic_rpcbind_set(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) u32 version, int family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) unsigned short proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) const struct svc_version *vers = progp->pg_vers[version];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (vers == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (vers->vs_hidden) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) trace_svc_noregister(progp->pg_name, version, proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) port, family, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * Don't register a UDP port if we need congestion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) error = svc_rpcbind_set_version(net, progp, version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) family, proto, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return (vers->vs_rpcb_optnl) ? 0 : error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * svc_register - register an RPC service with the local portmapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @serv: svc_serv struct for the service to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * @net: net namespace for the service to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * @family: protocol family of service's listener socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * @proto: transport protocol number to advertise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * @port: port to advertise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * Service is registered for any address in the passed-in protocol family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int svc_register(const struct svc_serv *serv, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) const int family, const unsigned short proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) const unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct svc_program *progp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) WARN_ON_ONCE(proto == 0 && port == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (proto == 0 && port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) for (progp = serv->sv_program; progp; progp = progp->pg_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) for (i = 0; i < progp->pg_nvers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) error = progp->pg_rpcbind_set(net, progp, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) family, proto, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) printk(KERN_WARNING "svc: failed to register "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) "%sv%u RPC service (errno %d).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) progp->pg_name, i, -error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * If user space is running rpcbind, it should take the v4 UNSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * and clear everything for this [program, version]. If user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * is running portmap, it will reject the v4 UNSET, but won't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * in this case to clear all existing entries for [program, version].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static void __svc_unregister(struct net *net, const u32 program, const u32 version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) const char *progname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) error = rpcb_v4_register(net, program, version, NULL, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * User space didn't support rpcbind v4, so retry this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * request with the legacy rpcbind v2 protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (error == -EPROTONOSUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) error = rpcb_register(net, program, version, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) trace_svc_unregister(progname, version, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * All netids, bind addresses and ports registered for [program, version]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * are removed from the local rpcbind database (if the service is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * hidden) to make way for a new instance of the service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * The result of unregistration is reported via dprintk for those who want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * verification of the result, but is otherwise not important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void svc_unregister(const struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct svc_program *progp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) clear_thread_flag(TIF_SIGPENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) for (progp = serv->sv_program; progp; progp = progp->pg_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) for (i = 0; i < progp->pg_nvers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (progp->pg_vers[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (progp->pg_vers[i]->vs_hidden)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) spin_lock_irqsave(¤t->sighand->siglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) recalc_sigpending();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) spin_unlock_irqrestore(¤t->sighand->siglock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * dprintk the given error with the address of the client that caused it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static __printf(2, 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct va_format vaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) char buf[RPC_MAX_ADDRBUFLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) vaf.fmt = fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) vaf.va = &args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) set_bit(RQ_AUTHERR, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return auth_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) EXPORT_SYMBOL_GPL(svc_return_autherr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return *statp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return rpc_auth_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct kvec *argv = &rqstp->rq_arg.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct kvec *resv = &rqstp->rq_res.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) const struct svc_procedure *procp = rqstp->rq_procinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Decode arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * XXX: why do we ignore the return value?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (procp->pc_decode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) !procp->pc_decode(rqstp, argv->iov_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) *statp = rpc_garbage_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) *statp = procp->pc_func(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (*statp == rpc_drop_reply ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) test_bit(RQ_DROPME, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (*statp != rpc_success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* Encode reply */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (procp->pc_encode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) dprintk("svc: failed to encode reply\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* serv->sv_stats->rpcsystemerr++; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) *statp = rpc_system_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) svc_generic_init_request(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct svc_process_info *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) const struct svc_version *versp = NULL; /* compiler food */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) const struct svc_procedure *procp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (rqstp->rq_vers >= progp->pg_nvers )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) goto err_bad_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) versp = progp->pg_vers[rqstp->rq_vers];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!versp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) goto err_bad_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * Some protocol versions (namely NFSv4) require some form of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * congestion control. (See RFC 7530 section 3.1 paragraph 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * In other words, UDP is not allowed. We mark those when setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * up the svc_xprt, and verify that here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * The spec is not very clear about what error should be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * when someone tries to access a server that is listening on UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * for lower versions. RPC_PROG_MISMATCH seems to be the closest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) goto err_bad_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (rqstp->rq_proc >= versp->vs_nproc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto err_bad_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (!procp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto err_bad_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* Initialize storage for argp and resp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) memset(rqstp->rq_argp, 0, procp->pc_argsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) memset(rqstp->rq_resp, 0, procp->pc_ressize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Bump per-procedure stats counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) versp->vs_count[rqstp->rq_proc]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) ret->dispatch = versp->vs_dispatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) return rpc_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) err_bad_vers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ret->mismatch.lovers = progp->pg_lovers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ret->mismatch.hivers = progp->pg_hivers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return rpc_prog_mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) err_bad_proc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return rpc_proc_unavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) EXPORT_SYMBOL_GPL(svc_generic_init_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * Common routine for processing the RPC request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct svc_program *progp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) const struct svc_procedure *procp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct svc_process_info process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) __be32 *statp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) u32 prog, vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) __be32 auth_stat, rpc_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int auth_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) __be32 *reply_statp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rpc_stat = rpc_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (argv->iov_len < 6*4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) goto err_short_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* Will be turned off by GSS integrity and privacy services */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Will be turned off only when NFSv4 Sessions are used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) clear_bit(RQ_DROPME, &rqstp->rq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) svc_putu32(resv, rqstp->rq_xid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) vers = svc_getnl(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* First words of reply: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) svc_putnl(resv, 1); /* REPLY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (vers != 2) /* RPC version number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto err_bad_rpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Save position in case we later decide to reject: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) reply_statp = resv->iov_base + resv->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) svc_putnl(resv, 0); /* ACCEPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rqstp->rq_vers = svc_getnl(argv); /* version number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) rqstp->rq_proc = svc_getnl(argv); /* procedure number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) for (progp = serv->sv_program; progp; progp = progp->pg_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (prog == progp->pg_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * Decode auth data, and add verifier to reply buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * We do this before anything else in order to get a decent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * auth verifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) auth_res = svc_authenticate(rqstp, &auth_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* Also give the program a chance to reject this call: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (auth_res == SVC_OK && progp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) auth_stat = rpc_autherr_badcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) auth_res = progp->pg_authenticate(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (auth_res != SVC_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) trace_svc_authenticate(rqstp, auth_res, auth_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) switch (auth_res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) case SVC_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) case SVC_GARBAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) goto err_garbage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) case SVC_SYSERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) rpc_stat = rpc_system_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) goto err_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) case SVC_DENIED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) goto err_bad_auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case SVC_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) case SVC_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) goto dropit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) case SVC_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (progp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) goto err_bad_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) rpc_stat = progp->pg_init_request(rqstp, progp, &process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) switch (rpc_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case rpc_success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case rpc_prog_unavail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) goto err_bad_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case rpc_prog_mismatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto err_bad_vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) case rpc_proc_unavail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto err_bad_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) procp = rqstp->rq_procinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /* Should this check go into the dispatcher? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (!procp || !procp->pc_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) goto err_bad_proc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /* Syntactic check complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) serv->sv_stats->rpccnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) trace_svc_process(rqstp, progp->pg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /* Build the reply header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) statp = resv->iov_base +resv->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) svc_putnl(resv, RPC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* un-reserve some of the out-queue now that we have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * better idea of reply size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (procp->pc_xdrressize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /* Call the function that processes the request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (!process.dispatch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!svc_generic_dispatch(rqstp, statp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) goto release_dropit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (*statp == rpc_garbage_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) goto err_garbage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) auth_stat = svc_get_autherr(rqstp, statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (auth_stat != rpc_auth_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) goto err_release_bad_auth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) dprintk("svc: calling dispatcher\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!process.dispatch(rqstp, statp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) goto release_dropit; /* Release reply info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* Check RPC status result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (*statp != rpc_success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) resv->iov_len = ((void*)statp) - resv->iov_base + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* Release reply info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (procp->pc_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) procp->pc_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (procp->pc_encode == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto dropit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) sendit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (svc_authorise(rqstp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) goto close_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return 1; /* Caller can now send it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) release_dropit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (procp->pc_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) procp->pc_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) dropit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) svc_authorise(rqstp); /* doesn't hurt to call this twice */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dprintk("svc: svc_process dropit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) svc_authorise(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) close_xprt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) svc_close_xprt(rqstp->rq_xprt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dprintk("svc: svc_process close\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err_short_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) svc_printk(rqstp, "short len %zd, dropping request\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) argv->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) goto close_xprt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) err_bad_rpc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) svc_putnl(resv, 1); /* REJECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) svc_putnl(resv, 0); /* RPC_MISMATCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) svc_putnl(resv, 2); /* Only RPCv2 supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) svc_putnl(resv, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) err_release_bad_auth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (procp->pc_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) procp->pc_release(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) err_bad_auth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) serv->sv_stats->rpcbadauth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* Restore write pointer to location of accept status: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) xdr_ressize_check(rqstp, reply_statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) svc_putnl(resv, 1); /* REJECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) svc_putnl(resv, 1); /* AUTH_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) svc_putnl(resv, ntohl(auth_stat)); /* status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) err_bad_prog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) dprintk("svc: unknown program %d\n", prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) svc_putnl(resv, RPC_PROG_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) err_bad_vers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) svc_putnl(resv, RPC_PROG_MISMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) svc_putnl(resv, process.mismatch.lovers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) svc_putnl(resv, process.mismatch.hivers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) err_bad_proc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) svc_putnl(resv, RPC_PROC_UNAVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) err_garbage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) svc_printk(rqstp, "failed to decode args\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) rpc_stat = rpc_garbage_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) err_bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) svc_putnl(resv, ntohl(rpc_stat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) goto sendit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * Process the RPC request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) svc_process(struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct kvec *argv = &rqstp->rq_arg.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct kvec *resv = &rqstp->rq_res.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct svc_serv *serv = rqstp->rq_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) u32 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Setup response xdr_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * Initially it has just one page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) rqstp->rq_next_page = &rqstp->rq_respages[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) resv->iov_base = page_address(rqstp->rq_respages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) resv->iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) rqstp->rq_res.pages = rqstp->rq_respages + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) rqstp->rq_res.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) rqstp->rq_res.page_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) rqstp->rq_res.page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) rqstp->rq_res.buflen = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) rqstp->rq_res.tail[0].iov_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) rqstp->rq_res.tail[0].iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dir = svc_getnl(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (dir != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* direction != CALL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) serv->sv_stats->rpcbadfmt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) goto out_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* Returns 1 for send, 0 for drop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (likely(svc_process_common(rqstp, argv, resv)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return svc_send(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) out_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) svc_drop(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) EXPORT_SYMBOL_GPL(svc_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * Process a backchannel RPC request that arrived over an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * outbound connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct kvec *argv = &rqstp->rq_arg.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct kvec *resv = &rqstp->rq_res.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct rpc_task *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int proc_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dprintk("svc: %s(%p)\n", __func__, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* Build the svc_rqst used by the common processing routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) rqstp->rq_xid = req->rq_xid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) rqstp->rq_prot = req->rq_xprt->prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) rqstp->rq_server = serv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) rqstp->rq_bc_net = req->rq_xprt->xprt_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* Adjust the argument buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) rqstp->rq_arg.len = req->rq_private_buf.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) rqstp->rq_arg.page_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) rqstp->rq_arg.page_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) rqstp->rq_arg.page_len = rqstp->rq_arg.len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) rqstp->rq_arg.head[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) rqstp->rq_arg.page_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* reset result send buffer "put" position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) resv->iov_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * Skip the next two words because they've already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * processed in the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) svc_getu32(argv); /* XID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) svc_getnl(argv); /* CALLDIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* Parse and execute the bc call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) proc_error = svc_process_common(rqstp, argv, resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) atomic_dec(&req->rq_xprt->bc_slot_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!proc_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* Processing error: drop the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) xprt_free_bc_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* Finally, send the reply synchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) task = rpc_run_bc_task(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (IS_ERR(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) error = PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) error = task->tk_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) rpc_put_task(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) dprintk("svc: %s(), error=%d\n", __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) EXPORT_SYMBOL_GPL(bc_svc_process);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) #endif /* CONFIG_SUNRPC_BACKCHANNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * Return (transport-specific) limit on the rpc payload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) u32 svc_max_payload(const struct svc_rqst *rqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (rqstp->rq_server->sv_max_payload < max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) max = rqstp->rq_server->sv_max_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) EXPORT_SYMBOL_GPL(svc_max_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * svc_encode_read_payload - mark a range of bytes as a READ payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * @rqstp: svc_rqst to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * @offset: payload's byte offset in rqstp->rq_res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * @length: size of payload, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * Returns zero on success, or a negative errno if a permanent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) EXPORT_SYMBOL_GPL(svc_encode_read_payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * svc_fill_write_vector - Construct data argument for VFS write call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * @rqstp: svc_rqst to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) * @pages: list of pages containing data payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * @first: buffer containing first section of write payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * @total: total number of bytes of write payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * Fills in rqstp::rq_vec, and returns the number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct kvec *first, size_t total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct kvec *vec = rqstp->rq_vec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) /* Some types of transport can present the write payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * entirely in rq_arg.pages. In this case, @first is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (first->iov_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) vec[i].iov_base = first->iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) vec[i].iov_len = min_t(size_t, total, first->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) total -= vec[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) while (total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) vec[i].iov_base = page_address(*pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) total -= vec[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) ++pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) EXPORT_SYMBOL_GPL(svc_fill_write_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * @rqstp: svc_rqst to operate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * @first: buffer containing first section of pathname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * @p: buffer containing remaining section of pathname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * @total: total length of the pathname argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * the returned string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) void *p, size_t total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) size_t len, remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) char *result, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) result = kmalloc(total + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) return ERR_PTR(-ESERVERFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) dst = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) remaining = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) len = min_t(size_t, total, first->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) memcpy(dst, first->iov_base, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) dst += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) remaining -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) len = min_t(size_t, remaining, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) memcpy(dst, p, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) dst += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) *dst = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* Sanity check: Linux doesn't allow the pathname argument to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * contain a NUL byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (strlen(result) != total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) kfree(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);