^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Central processing for nfsd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Olaf Kirch (okir@monad.swb.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/fs_struct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sunrpc/stats.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sunrpc/svcsock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sunrpc/svc_xprt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/lockd/bind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/nfsacl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "nfsd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "vfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "netns.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "filecache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define NFSDDBG_FACILITY NFSDDBG_SVC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bool inter_copy_offload_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) module_param(inter_copy_offload_enable, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MODULE_PARM_DESC(inter_copy_offload_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "Enable inter server to server copy offload. Default: false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) extern struct svc_program nfsd_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int nfsd(void *vrqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int nfsd_acl_rpcbind_set(struct net *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const struct svc_program *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned short,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static __be32 nfsd_acl_init_request(struct svc_rqst *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) const struct svc_program *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct svc_process_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static int nfsd_rpcbind_set(struct net *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const struct svc_program *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned short,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static __be32 nfsd_init_request(struct svc_rqst *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) const struct svc_program *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct svc_process_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * of nfsd threads must exist and each must listed in ->sp_all_threads in each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * entry of ->sv_pools[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Transitions of the thread count between zero and non-zero are of particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * interest since the svc_serv needs to be created and initialized at that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * point, or freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Finally, the nfsd_mutex also protects some of the global variables that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * accessed when nfsd starts and that are settable via the write_* routines in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * nfsctl.c. In particular:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * user_recovery_dirname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * user_lease_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * nfsd_versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) DEFINE_MUTEX(nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * nfsd_drc_max_pages limits the total amount of memory available for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * version 4.1 DRC caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) spinlock_t nfsd_drc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long nfsd_drc_max_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long nfsd_drc_mem_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct svc_stat nfsd_acl_svcstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static const struct svc_version *nfsd_acl_version[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) [2] = &nfsd_acl_version2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) [3] = &nfsd_acl_version3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define NFSD_ACL_MINVERS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct svc_program nfsd_acl_program = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .pg_prog = NFS_ACL_PROGRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .pg_nvers = NFSD_ACL_NRVERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .pg_vers = nfsd_acl_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .pg_name = "nfsacl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .pg_class = "nfsd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .pg_stats = &nfsd_acl_svcstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .pg_authenticate = &svc_set_client,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .pg_init_request = nfsd_acl_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) .pg_rpcbind_set = nfsd_acl_rpcbind_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static struct svc_stat nfsd_acl_svcstats = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .program = &nfsd_acl_program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static const struct svc_version *nfsd_version[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) [2] = &nfsd_version2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #if defined(CONFIG_NFSD_V3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) [3] = &nfsd_version3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #if defined(CONFIG_NFSD_V4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) [4] = &nfsd_version4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define NFSD_MINVERS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct svc_program nfsd_program = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .pg_next = &nfsd_acl_program,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .pg_prog = NFS_PROGRAM, /* program number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .pg_vers = nfsd_version, /* version table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) .pg_name = "nfsd", /* program name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .pg_class = "nfsd", /* authentication class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) .pg_stats = &nfsd_svcstats, /* version table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .pg_authenticate = &svc_set_client, /* export authentication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) .pg_init_request = nfsd_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) .pg_rpcbind_set = nfsd_rpcbind_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) nfsd_support_version(int vers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return nfsd_version[vers] != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static bool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) nfsd_alloc_versions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (vers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* All compiled versions are enabled by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) for (i = 0; i < NFSD_NRVERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) vers[i] = nfsd_support_version(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static bool *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) nfsd_alloc_minorversions(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) sizeof(bool), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (vers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* All minor versions are enabled by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) vers[i] = nfsd_support_version(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) nfsd_netns_free_versions(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) kfree(nn->nfsd_versions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) kfree(nn->nfsd4_minorversions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) nn->nfsd_versions = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) nn->nfsd4_minorversions = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) nfsd_netns_init_versions(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!nn->nfsd_versions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) nn->nfsd_versions = nfsd_alloc_versions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) nn->nfsd4_minorversions = nfsd_alloc_minorversions();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) nfsd_netns_free_versions(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) switch(change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case NFSD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (nn->nfsd_versions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) nn->nfsd_versions[vers] = nfsd_support_version(vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) case NFSD_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) nfsd_netns_init_versions(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (nn->nfsd_versions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) nn->nfsd_versions[vers] = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) case NFSD_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (nn->nfsd_versions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return nn->nfsd_versions[vers];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) case NFSD_AVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return nfsd_support_version(vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nfsd_adjust_nfsd_versions4(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (nn->nfsd4_minorversions[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nfsd_vers(nn, 4, NFSD_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) change != NFSD_AVAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) switch(change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) case NFSD_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (nn->nfsd4_minorversions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nfsd_vers(nn, 4, NFSD_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) nn->nfsd4_minorversions[minorversion] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) nfsd_vers(nn, 4, NFSD_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case NFSD_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) nfsd_netns_init_versions(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (nn->nfsd4_minorversions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) nn->nfsd4_minorversions[minorversion] = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) nfsd_adjust_nfsd_versions4(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case NFSD_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (nn->nfsd4_minorversions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return nn->nfsd4_minorversions[minorversion];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return nfsd_vers(nn, 4, NFSD_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) case NFSD_AVAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) nfsd_vers(nn, 4, NFSD_AVAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Maximum number of nfsd processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define NFSD_MAXSERVS 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int nfsd_nrthreads(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (nn->nfsd_serv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rv = nn->nfsd_serv->sv_nrthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int nfsd_init_socks(struct net *net, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!list_empty(&nn->nfsd_serv->sv_permsocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) SVC_SOCK_DEFAULTS, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) SVC_SOCK_DEFAULTS, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int nfsd_users = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static int nfsd_startup_generic(int nrservs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (nfsd_users++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = nfsd_file_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto dec_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = nfs4_state_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) goto out_file_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) out_file_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) nfsd_file_cache_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dec_users:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) nfsd_users--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void nfsd_shutdown_generic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (--nfsd_users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) nfs4_state_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) nfsd_file_cache_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static bool nfsd_needs_lockd(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) read_seqbegin_or_lock(&nn->boot_lock, &seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * This is opaque to client, so no need to byte-swap. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * __force to keep sparse happy. y2038 time_t overflow is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * irrelevant in this usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } while (need_seqretry(&nn->boot_lock, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) done_seqretry(&nn->boot_lock, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void nfsd_reset_boot_verifier_locked(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ktime_get_real_ts64(&nn->nfssvc_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void nfsd_reset_boot_verifier(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) write_seqlock(&nn->boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) nfsd_reset_boot_verifier_locked(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) write_sequnlock(&nn->boot_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int nfsd_startup_net(int nrservs, struct net *net, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (nn->nfsd_net_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ret = nfsd_startup_generic(nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = nfsd_init_socks(net, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto out_socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = lockd_up(net, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto out_socks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) nn->lockd_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ret = nfsd_file_cache_start_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out_lockd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret = nfs4_state_start_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto out_filecache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) nn->nfsd_net_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) out_filecache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nfsd_file_cache_shutdown_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) out_lockd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (nn->lockd_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) lockd_down(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) nn->lockd_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) out_socks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) nfsd_shutdown_generic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void nfsd_shutdown_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) nfsd_file_cache_shutdown_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) nfs4_state_shutdown_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (nn->lockd_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) lockd_down(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) nn->lockd_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) nn->nfsd_net_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) nfsd_shutdown_generic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct net_device *dev = ifa->ifa_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct sockaddr_in sin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if ((event != NETDEV_DOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) !atomic_inc_not_zero(&nn->ntf_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (nn->nfsd_serv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) sin.sin_family = AF_INET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) sin.sin_addr.s_addr = ifa->ifa_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) atomic_dec(&nn->ntf_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) wake_up(&nn->ntf_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static struct notifier_block nfsd_inetaddr_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .notifier_call = nfsd_inetaddr_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int nfsd_inet6addr_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct net_device *dev = ifa->idev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct sockaddr_in6 sin6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if ((event != NETDEV_DOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) !atomic_inc_not_zero(&nn->ntf_refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (nn->nfsd_serv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sin6.sin6_family = AF_INET6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) sin6.sin6_addr = ifa->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sin6.sin6_scope_id = ifa->idev->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) atomic_dec(&nn->ntf_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) wake_up(&nn->ntf_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static struct notifier_block nfsd_inet6addr_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .notifier_call = nfsd_inet6addr_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Only used under nfsd_mutex, so this atomic may be overkill: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) atomic_dec(&nn->ntf_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* check if the notifier still has clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * write_ports can create the server without actually starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * any threads--if we get shut down before any threads are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * started, then nfsd_last_thread will be run before any of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * other initialization has been done except the rpcb information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) svc_rpcb_cleanup(serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!nn->nfsd_net_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) nfsd_shutdown_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pr_info("nfsd: last server has exited, flushing export cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) nfsd_export_flush(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void nfsd_reset_versions(struct nfsd_net *nn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) for (i = 0; i < NFSD_NRVERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (nfsd_vers(nn, i, NFSD_TEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) for (i = 0; i < NFSD_NRVERS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (i != 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) nfsd_vers(nn, i, NFSD_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int minor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) while (nfsd_minorversion(nn, minor, NFSD_SET) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) minor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Each session guarantees a negotiated per slot memory cache for replies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * NFSv4.1 server might want to use more memory for a DRC than a machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * with mutiple services.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Impose a hard limit on the number of pages for the DRC which varies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * according to the machines free pages. This is of course only a default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * For now this is a #defined shift which could be under admin control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static void set_max_drc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #define NFSD_DRC_SIZE_SHIFT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) nfsd_drc_max_mem = (nr_free_buffer_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) nfsd_drc_mem_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) spin_lock_init(&nfsd_drc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int nfsd_get_default_max_blksize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct sysinfo i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned long long target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) si_meminfo(&i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * machines, but only uses 32K on 128M machines. Bottom out at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * 8K on 32M and smaller. Of course, this is only a default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) target >>= 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = NFSSVC_MAXBLKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) while (ret > target && ret >= 8*1024*2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static const struct svc_serv_ops nfsd_thread_sv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .svo_shutdown = nfsd_last_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) .svo_function = nfsd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) .svo_enqueue_xprt = svc_xprt_do_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .svo_setup = svc_set_num_threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .svo_module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) bool i_am_nfsd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return kthread_func(current) == nfsd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int nfsd_create_serv(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) WARN_ON(!mutex_is_locked(&nfsd_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (nn->nfsd_serv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) svc_get(nn->nfsd_serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (nfsd_max_blksize == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) nfsd_max_blksize = nfsd_get_default_max_blksize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) nfsd_reset_versions(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &nfsd_thread_sv_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (nn->nfsd_serv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) nn->nfsd_serv->sv_maxconn = nn->max_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) error = svc_bind(nn->nfsd_serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) svc_destroy(nn->nfsd_serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) set_max_drc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* check if the notifier is already set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) register_inetaddr_notifier(&nfsd_inetaddr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) register_inet6addr_notifier(&nfsd_inet6addr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) atomic_inc(&nn->ntf_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) nfsd_reset_boot_verifier(nn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int nfsd_nrpools(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (nn->nfsd_serv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return nn->nfsd_serv->sv_nrpools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (nn->nfsd_serv != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) void nfsd_destroy(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int destroy = (nn->nfsd_serv->sv_nrthreads == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) svc_shutdown_net(nn->nfsd_serv, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) svc_destroy(nn->nfsd_serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) nn->nfsd_serv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) int tot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) WARN_ON(!mutex_is_locked(&nfsd_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (nn->nfsd_serv == NULL || n <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (n > nn->nfsd_serv->sv_nrpools)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) n = nn->nfsd_serv->sv_nrpools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* enforce a global maximum number of threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) tot += nthreads[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (tot > NFSD_MAXSERVS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* total too large: scale down requested numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) for (i = 0; i < n && tot > 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int new = nthreads[i] * NFSD_MAXSERVS / tot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) tot -= (nthreads[i] - new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) nthreads[i] = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for (i = 0; i < n && tot > 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) nthreads[i]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) tot--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * There must always be a thread in pool 0; the admin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * can't shut down NFS completely using pool_threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (nthreads[0] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) nthreads[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* apply the new numbers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) svc_get(nn->nfsd_serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) &nn->nfsd_serv->sv_pools[i], nthreads[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) nfsd_destroy(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Adjust the number of threads and return the new number of threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * This is also the function that starts the server if necessary, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * this is the first time nrservs is nonzero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) bool nfsd_up_before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dprintk("nfsd: creating service\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) nrservs = max(nrservs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) nrservs = min(nrservs, NFSD_MAXSERVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (nrservs == 0 && nn->nfsd_serv == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) strlcpy(nn->nfsd_name, utsname()->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) sizeof(nn->nfsd_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) error = nfsd_create_serv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) nfsd_up_before = nn->nfsd_net_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) error = nfsd_startup_net(nrservs, net, cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) goto out_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) NULL, nrservs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto out_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* We are holding a reference to nn->nfsd_serv which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * we don't want to count in the return value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * so subtract 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) error = nn->nfsd_serv->sv_nrthreads - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) out_shutdown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (error < 0 && !nfsd_up_before)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) nfsd_shutdown_net(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) out_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) nfsd_destroy(net); /* Release server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) nfsd_support_acl_version(int vers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (vers >= NFSD_ACL_MINVERS && vers < NFSD_ACL_NRVERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return nfsd_acl_version[vers] != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) nfsd_acl_rpcbind_set(struct net *net, const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) u32 version, int family, unsigned short proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!nfsd_support_acl_version(version) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) !nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return svc_generic_rpcbind_set(net, progp, version, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) proto, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) nfsd_acl_init_request(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct svc_process_info *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (likely(nfsd_support_acl_version(rqstp->rq_vers) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return svc_generic_init_request(rqstp, progp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret->mismatch.lovers = NFSD_ACL_NRVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (nfsd_support_acl_version(rqstp->rq_vers) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) nfsd_vers(nn, i, NFSD_TEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ret->mismatch.lovers = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (ret->mismatch.lovers == NFSD_ACL_NRVERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return rpc_prog_unavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ret->mismatch.hivers = NFSD_ACL_MINVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) for (i = NFSD_ACL_NRVERS - 1; i >= NFSD_ACL_MINVERS; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (nfsd_support_acl_version(rqstp->rq_vers) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) nfsd_vers(nn, i, NFSD_TEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ret->mismatch.hivers = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return rpc_prog_mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) nfsd_rpcbind_set(struct net *net, const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 version, int family, unsigned short proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) unsigned short port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return svc_generic_rpcbind_set(net, progp, version, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) proto, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static __be32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) nfsd_init_request(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) const struct svc_program *progp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct svc_process_info *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return svc_generic_init_request(rqstp, progp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret->mismatch.lovers = NFSD_NRVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (nfsd_vers(nn, i, NFSD_TEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ret->mismatch.lovers = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ret->mismatch.lovers == NFSD_NRVERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return rpc_prog_unavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ret->mismatch.hivers = NFSD_MINVERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (nfsd_vers(nn, i, NFSD_TEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ret->mismatch.hivers = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return rpc_prog_mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * This is the NFS server kernel thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) nfsd(void *vrqstp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct net *net = perm_sock->xpt_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct nfsd_net *nn = net_generic(net, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Lock module and set up kernel thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* At this point, the thread shares current->fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * with the init process. We need to create files with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * umask as defined by the client instead of init's umask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (unshare_fs_struct() < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) printk("Unable to start nfsd thread: out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) current->fs->umask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * thread is spawned with all signals set to SIG_IGN, re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * the ones that will bring down the thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) allow_signal(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) allow_signal(SIGHUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) allow_signal(SIGINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) allow_signal(SIGQUIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) nfsdstats.th_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) set_freezable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * The main request loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Update sv_maxconn if it has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rqstp->rq_server->sv_maxconn = nn->max_connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Find a socket with data available and call its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * recvfrom routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (err == -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) validate_process_creds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) svc_process(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) validate_process_creds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Clear signals before calling svc_exit_thread() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) flush_signals(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) nfsdstats.th_cnt --;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) rqstp->rq_server = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Release the thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) svc_exit_thread(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) nfsd_destroy(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Release module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) module_put_and_exit(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * A write procedure can have a large argument, and a read procedure can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * reply that can both be larger than a page. The xdr code has taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * advantage of this assumption to be a sloppy about bounds checking in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * problem, we enforce these assumptions here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static bool nfs_request_too_big(struct svc_rqst *rqstp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) const struct svc_procedure *proc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * The ACL code has more careful bounds-checking and is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * susceptible to this problem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (rqstp->rq_prog != NFS_PROGRAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Ditto NFSv4 (which can in theory have argument and reply both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * more than a page):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (rqstp->rq_vers >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* The reply will be small, we're OK: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (proc->pc_xdrressize > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return rqstp->rq_arg.len > PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * nfsd_dispatch - Process an NFS or NFSACL Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @rqstp: incoming request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * @statp: pointer to location of accept_stat field in RPC Reply buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * This RPC dispatcher integrates the NFS server's duplicate reply cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * %0: Processing complete; do not send a Reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * %1: Processing complete; send Reply in rqstp->rq_res
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) const struct svc_procedure *proc = rqstp->rq_procinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct kvec *argv = &rqstp->rq_arg.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct kvec *resv = &rqstp->rq_res.head[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) __be32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dprintk("nfsd_dispatch: vers %d proc %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) rqstp->rq_vers, rqstp->rq_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (nfs_request_too_big(rqstp, proc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto out_too_large;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Give the xdr decoder a chance to change this if it wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * (necessary in the NFSv4.0 compound case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) rqstp->rq_cachetype = proc->pc_cachetype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!proc->pc_decode(rqstp, argv->iov_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto out_decode_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) switch (nfsd_cache_lookup(rqstp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) case RC_DOIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) case RC_REPLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto out_cached_reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) case RC_DROPIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) goto out_dropit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Need to grab the location to store the status, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * NFSv4 does some encoding while processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) p = resv->iov_base + resv->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) resv->iov_len += sizeof(__be32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *statp = proc->pc_func(rqstp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) goto out_update_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!proc->pc_encode(rqstp, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto out_encode_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) out_cached_reply:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) out_too_large:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) *statp = rpc_garbage_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) out_decode_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dprintk("nfsd: failed to decode arguments!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) *statp = rpc_garbage_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) out_update_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dprintk("nfsd: Dropping request; may be revisited later\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) out_dropit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) out_encode_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) dprintk("nfsd: failed to encode result!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) *statp = rpc_system_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int nfsd_pool_stats_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (nn->nfsd_serv == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* bump up the psudo refcount while traversing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) svc_get(nn->nfsd_serv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = svc_pool_stats_open(nn->nfsd_serv, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int nfsd_pool_stats_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int ret = seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct net *net = inode->i_sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) mutex_lock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /* this function really, really should have been called svc_put() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) nfsd_destroy(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) mutex_unlock(&nfsd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }