blob: 14c1ef6f8cc7451edf3250ae17d5d0881a8e0813 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Central processing for nfsd.
4 *
5 * Authors: Olaf Kirch (okir@monad.swb.de)
6 *
7 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
8 */
9
Ingo Molnar3f07c012017-02-08 18:51:30 +010010#include <linux/sched/signal.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070011#include <linux/freezer.h>
Paul Gortmaker143cb492011-07-01 14:23:34 -040012#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/fs_struct.h>
Andy Adamsonc3d06f92009-04-03 08:28:18 +030014#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/sunrpc/stats.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/sunrpc/svcsock.h>
Scott Mayhew36684992015-12-11 16:45:59 -050018#include <linux/sunrpc/svc_xprt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/lockd/bind.h>
Andreas Gruenbachera257cdd2005-06-22 17:16:26 +000020#include <linux/nfsacl.h>
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +090021#include <linux/seq_file.h>
Scott Mayhew36684992015-12-11 16:45:59 -050022#include <linux/inetdevice.h>
23#include <net/addrconf.h>
24#include <net/ipv6.h>
Pavel Emelyanovfc5d00b2010-09-29 16:03:50 +040025#include <net/net_namespace.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020026#include "nfsd.h"
27#include "cache.h"
J. Bruce Fields0a3adad2009-11-04 18:12:35 -050028#include "vfs.h"
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +030029#include "netns.h"
Jeff Layton65294c12019-08-18 14:18:48 -040030#include "filecache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Chuck Lever0dfdad12020-10-19 13:00:29 -040032#include "trace.h"
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define NFSDDBG_FACILITY NFSDDBG_SVC
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036extern struct svc_program nfsd_program;
Jeff Layton9867d762008-06-10 08:40:38 -040037static int nfsd(void *vrqstp);
Trond Myklebust029be5d2019-04-09 11:46:18 -040038#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
39static int nfsd_acl_rpcbind_set(struct net *,
40 const struct svc_program *,
41 u32, int,
42 unsigned short,
43 unsigned short);
Trond Myklebuste333f3b2019-04-09 11:46:19 -040044static __be32 nfsd_acl_init_request(struct svc_rqst *,
45 const struct svc_program *,
46 struct svc_process_info *);
Trond Myklebust029be5d2019-04-09 11:46:18 -040047#endif
48static int nfsd_rpcbind_set(struct net *,
49 const struct svc_program *,
50 u32, int,
51 unsigned short,
52 unsigned short);
Trond Myklebuste333f3b2019-04-09 11:46:19 -040053static __be32 nfsd_init_request(struct svc_rqst *,
54 const struct svc_program *,
55 struct svc_process_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Neil Brownbedbdd82008-06-10 08:40:35 -040057/*
NeilBrown2a363952021-11-29 15:51:25 +110058 * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
59 * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
Neil Brownbedbdd82008-06-10 08:40:35 -040060 *
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +030061 * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
NeilBrownec523612021-11-29 15:51:25 +110062 * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0 (unless
63 * nn->keep_active is set). That number of nfsd threads must
64 * exist and each must be listed in ->sp_all_threads in some entry of
65 * ->sv_pools[].
Neil Brownbedbdd82008-06-10 08:40:35 -040066 *
NeilBrownec523612021-11-29 15:51:25 +110067 * Each active thread holds a counted reference on nn->nfsd_serv, as does
68 * the nn->keep_active flag and various transient calls to svc_get().
Jeff Layton3dd98a32008-06-10 08:40:36 -040069 *
70 * Finally, the nfsd_mutex also protects some of the global variables that are
71 * accessed when nfsd starts and that are settable via the write_* routines in
72 * nfsctl.c. In particular:
73 *
74 * user_recovery_dirname
75 * user_lease_time
76 * nfsd_versions
Neil Brownbedbdd82008-06-10 08:40:35 -040077 */
78DEFINE_MUTEX(nfsd_mutex);
Neil Brownbedbdd82008-06-10 08:40:35 -040079
Andy Adamson4bd9b0f42009-06-24 15:37:45 -040080/*
81 * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
82 * nfsd_drc_max_pages limits the total amount of memory available for
83 * version 4.1 DRC caches.
84 * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
85 */
Guobin Huangb73ac682021-04-06 20:08:18 +080086DEFINE_SPINLOCK(nfsd_drc_lock);
Zhang Yanfei697ce9b2013-02-22 16:35:47 -080087unsigned long nfsd_drc_max_mem;
88unsigned long nfsd_drc_mem_used;
Andy Adamson4bd9b0f42009-06-24 15:37:45 -040089
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -080090#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
91static struct svc_stat nfsd_acl_svcstats;
Christoph Hellwige9679182017-05-12 16:21:37 +020092static const struct svc_version *nfsd_acl_version[] = {
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -080093 [2] = &nfsd_acl_version2,
94 [3] = &nfsd_acl_version3,
95};
96
97#define NFSD_ACL_MINVERS 2
Tobias Klausere8c96f82006-03-24 03:15:34 -080098#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -080099
100static struct svc_program nfsd_acl_program = {
101 .pg_prog = NFS_ACL_PROGRAM,
102 .pg_nvers = NFSD_ACL_NRVERS,
J. Bruce Fields7c149052019-11-19 16:05:33 -0500103 .pg_vers = nfsd_acl_version,
NeilBrown1a8eff62007-01-26 00:56:58 -0800104 .pg_name = "nfsacl",
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800105 .pg_class = "nfsd",
106 .pg_stats = &nfsd_acl_svcstats,
107 .pg_authenticate = &svc_set_client,
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400108 .pg_init_request = nfsd_acl_init_request,
Trond Myklebust029be5d2019-04-09 11:46:18 -0400109 .pg_rpcbind_set = nfsd_acl_rpcbind_set,
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800110};
111
112static struct svc_stat nfsd_acl_svcstats = {
113 .program = &nfsd_acl_program,
114};
115#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
116
Christoph Hellwige9679182017-05-12 16:21:37 +0200117static const struct svc_version *nfsd_version[] = {
NeilBrown70c3b762005-11-07 01:00:25 -0800118 [2] = &nfsd_version2,
119#if defined(CONFIG_NFSD_V3)
120 [3] = &nfsd_version3,
121#endif
122#if defined(CONFIG_NFSD_V4)
123 [4] = &nfsd_version4,
124#endif
125};
126
127#define NFSD_MINVERS 2
Tobias Klausere8c96f82006-03-24 03:15:34 -0800128#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
NeilBrown70c3b762005-11-07 01:00:25 -0800129
130struct svc_program nfsd_program = {
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800131#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
132 .pg_next = &nfsd_acl_program,
133#endif
NeilBrown70c3b762005-11-07 01:00:25 -0800134 .pg_prog = NFS_PROGRAM, /* program number */
135 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400136 .pg_vers = nfsd_version, /* version table */
NeilBrown70c3b762005-11-07 01:00:25 -0800137 .pg_name = "nfsd", /* program name */
138 .pg_class = "nfsd", /* authentication class */
139 .pg_stats = &nfsd_svcstats, /* version table */
140 .pg_authenticate = &svc_set_client, /* export authentication */
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400141 .pg_init_request = nfsd_init_request,
Trond Myklebust029be5d2019-04-09 11:46:18 -0400142 .pg_rpcbind_set = nfsd_rpcbind_set,
NeilBrown70c3b762005-11-07 01:00:25 -0800143};
144
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400145static bool
146nfsd_support_version(int vers)
147{
148 if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
149 return nfsd_version[vers] != NULL;
150 return false;
151}
Benny Halevy8daf2202009-04-03 08:28:59 +0300152
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400153static bool *
154nfsd_alloc_versions(void)
155{
156 bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
157 unsigned i;
158
159 if (vers) {
160 /* All compiled versions are enabled by default */
161 for (i = 0; i < NFSD_NRVERS; i++)
162 vers[i] = nfsd_support_version(i);
163 }
164 return vers;
165}
166
167static bool *
168nfsd_alloc_minorversions(void)
169{
170 bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
171 sizeof(bool), GFP_KERNEL);
172 unsigned i;
173
174 if (vers) {
175 /* All minor versions are enabled by default */
176 for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
177 vers[i] = nfsd_support_version(4);
178 }
179 return vers;
180}
181
182void
183nfsd_netns_free_versions(struct nfsd_net *nn)
184{
185 kfree(nn->nfsd_versions);
186 kfree(nn->nfsd4_minorversions);
187 nn->nfsd_versions = NULL;
188 nn->nfsd4_minorversions = NULL;
189}
190
191static void
192nfsd_netns_init_versions(struct nfsd_net *nn)
193{
194 if (!nn->nfsd_versions) {
195 nn->nfsd_versions = nfsd_alloc_versions();
196 nn->nfsd4_minorversions = nfsd_alloc_minorversions();
197 if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
198 nfsd_netns_free_versions(nn);
199 }
200}
201
202int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
NeilBrown6658d3a2006-10-02 02:17:46 -0700203{
204 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
Pavel Emelyanov15ddb4a2010-05-14 15:33:36 +0400205 return 0;
NeilBrown6658d3a2006-10-02 02:17:46 -0700206 switch(change) {
207 case NFSD_SET:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400208 if (nn->nfsd_versions)
209 nn->nfsd_versions[vers] = nfsd_support_version(vers);
NeilBrown1a8eff62007-01-26 00:56:58 -0800210 break;
NeilBrown6658d3a2006-10-02 02:17:46 -0700211 case NFSD_CLEAR:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400212 nfsd_netns_init_versions(nn);
213 if (nn->nfsd_versions)
214 nn->nfsd_versions[vers] = false;
NeilBrown6658d3a2006-10-02 02:17:46 -0700215 break;
216 case NFSD_TEST:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400217 if (nn->nfsd_versions)
218 return nn->nfsd_versions[vers];
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500219 fallthrough;
NeilBrown6658d3a2006-10-02 02:17:46 -0700220 case NFSD_AVAIL:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400221 return nfsd_support_version(vers);
NeilBrown6658d3a2006-10-02 02:17:46 -0700222 }
223 return 0;
224}
Benny Halevy8daf2202009-04-03 08:28:59 +0300225
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500226static void
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400227nfsd_adjust_nfsd_versions4(struct nfsd_net *nn)
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500228{
229 unsigned i;
230
231 for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) {
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400232 if (nn->nfsd4_minorversions[i])
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500233 return;
234 }
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400235 nfsd_vers(nn, 4, NFSD_CLEAR);
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500236}
237
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400238int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change)
Benny Halevy8daf2202009-04-03 08:28:59 +0300239{
NeilBrown928c6fb2017-03-10 11:36:39 +1100240 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
241 change != NFSD_AVAIL)
Benny Halevy8daf2202009-04-03 08:28:59 +0300242 return -1;
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400243
Benny Halevy8daf2202009-04-03 08:28:59 +0300244 switch(change) {
245 case NFSD_SET:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400246 if (nn->nfsd4_minorversions) {
247 nfsd_vers(nn, 4, NFSD_SET);
248 nn->nfsd4_minorversions[minorversion] =
249 nfsd_vers(nn, 4, NFSD_TEST);
250 }
Benny Halevy8daf2202009-04-03 08:28:59 +0300251 break;
252 case NFSD_CLEAR:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400253 nfsd_netns_init_versions(nn);
254 if (nn->nfsd4_minorversions) {
255 nn->nfsd4_minorversions[minorversion] = false;
256 nfsd_adjust_nfsd_versions4(nn);
257 }
Benny Halevy8daf2202009-04-03 08:28:59 +0300258 break;
259 case NFSD_TEST:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400260 if (nn->nfsd4_minorversions)
261 return nn->nfsd4_minorversions[minorversion];
262 return nfsd_vers(nn, 4, NFSD_TEST);
Benny Halevy8daf2202009-04-03 08:28:59 +0300263 case NFSD_AVAIL:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400264 return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
265 nfsd_vers(nn, 4, NFSD_AVAIL);
Benny Halevy8daf2202009-04-03 08:28:59 +0300266 }
267 return 0;
268}
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270/*
271 * Maximum number of nfsd processes
272 */
273#define NFSD_MAXSERVS 8192
274
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300275int nfsd_nrthreads(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Neil Brownc7d106c2008-06-12 13:38:42 +1000277 int rv = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300278 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
279
Neil Brownc7d106c2008-06-12 13:38:42 +1000280 mutex_lock(&nfsd_mutex);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300281 if (nn->nfsd_serv)
282 rv = nn->nfsd_serv->sv_nrthreads;
Neil Brownc7d106c2008-06-12 13:38:42 +1000283 mutex_unlock(&nfsd_mutex);
284 return rv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Trond Myklebust4df493a2019-04-09 12:13:37 -0400287static int nfsd_init_socks(struct net *net, const struct cred *cred)
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400288{
289 int error;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300290 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
291
292 if (!list_empty(&nn->nfsd_serv->sv_permsocks))
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400293 return 0;
294
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300295 error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400296 SVC_SOCK_DEFAULTS, cred);
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400297 if (error < 0)
298 return error;
299
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300300 error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400301 SVC_SOCK_DEFAULTS, cred);
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400302 if (error < 0)
303 return error;
304
305 return 0;
306}
307
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300308static int nfsd_users = 0;
Jeff Layton4ad9a342010-07-19 16:50:04 -0400309
Vasily Averin70c53072021-04-15 15:00:58 +0300310static int nfsd_startup_generic(void)
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300311{
312 int ret;
313
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300314 if (nfsd_users++)
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300315 return 0;
316
Jeff Layton65294c12019-08-18 14:18:48 -0400317 ret = nfsd_file_cache_init();
318 if (ret)
319 goto dec_users;
Kinglong Meed9499a92014-07-30 21:26:05 +0800320
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300321 ret = nfs4_state_start();
322 if (ret)
Jeff Layton501cb182019-08-18 14:18:56 -0400323 goto out_file_cache;
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300324 return 0;
325
Jeff Layton65294c12019-08-18 14:18:48 -0400326out_file_cache:
327 nfsd_file_cache_shutdown();
Kinglong Meed9499a92014-07-30 21:26:05 +0800328dec_users:
329 nfsd_users--;
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300330 return ret;
331}
332
333static void nfsd_shutdown_generic(void)
334{
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300335 if (--nfsd_users)
336 return;
337
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300338 nfs4_state_shutdown();
Jeff Layton65294c12019-08-18 14:18:48 -0400339 nfsd_file_cache_shutdown();
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300340}
341
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400342static bool nfsd_needs_lockd(struct nfsd_net *nn)
Kinglong Mee8ef66712013-12-31 13:17:30 +0800343{
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400344 return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800345}
346
Trond Myklebust27c438f2019-09-02 13:02:56 -0400347void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn)
348{
349 int seq = 0;
350
351 do {
352 read_seqbegin_or_lock(&nn->boot_lock, &seq);
353 /*
354 * This is opaque to client, so no need to byte-swap. Use
355 * __force to keep sparse happy. y2038 time_t overflow is
356 * irrelevant in this usage
357 */
358 verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
359 verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
360 } while (need_seqretry(&nn->boot_lock, seq));
361 done_seqretry(&nn->boot_lock, seq);
362}
363
YueHaibing65643f42019-09-23 13:58:59 +0800364static void nfsd_reset_boot_verifier_locked(struct nfsd_net *nn)
Trond Myklebust27c438f2019-09-02 13:02:56 -0400365{
366 ktime_get_real_ts64(&nn->nfssvc_boot);
367}
368
369void nfsd_reset_boot_verifier(struct nfsd_net *nn)
370{
371 write_seqlock(&nn->boot_lock);
372 nfsd_reset_boot_verifier_locked(nn);
373 write_sequnlock(&nn->boot_lock);
374}
375
Vasily Averin70c53072021-04-15 15:00:58 +0300376static int nfsd_startup_net(struct net *net, const struct cred *cred)
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300377{
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300378 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300379 int ret;
380
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300381 if (nn->nfsd_net_up)
382 return 0;
383
Vasily Averin70c53072021-04-15 15:00:58 +0300384 ret = nfsd_startup_generic();
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300385 if (ret)
386 return ret;
Trond Myklebust4df493a2019-04-09 12:13:37 -0400387 ret = nfsd_init_socks(net, cred);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300388 if (ret)
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300389 goto out_socks;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800390
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400391 if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
Trond Myklebust40373b12019-04-09 12:13:39 -0400392 ret = lockd_up(net, cred);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800393 if (ret)
394 goto out_socks;
zhengbine44b4bf2019-12-25 11:19:36 +0800395 nn->lockd_up = true;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800396 }
397
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500398 ret = nfsd_file_cache_start_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300399 if (ret)
400 goto out_lockd;
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500401 ret = nfs4_state_start_net(net);
402 if (ret)
403 goto out_filecache;
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300404
Dai Ngof4e44b32021-05-21 15:09:37 -0400405#ifdef CONFIG_NFSD_V4_2_INTER_SSC
406 nfsd4_ssc_init_umount_work(nn);
407#endif
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300408 nn->nfsd_net_up = true;
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300409 return 0;
410
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500411out_filecache:
412 nfsd_file_cache_shutdown_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300413out_lockd:
Kinglong Mee8ef66712013-12-31 13:17:30 +0800414 if (nn->lockd_up) {
415 lockd_down(net);
zhengbine44b4bf2019-12-25 11:19:36 +0800416 nn->lockd_up = false;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800417 }
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300418out_socks:
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300419 nfsd_shutdown_generic();
Jeff Layton4ad9a342010-07-19 16:50:04 -0400420 return ret;
421}
422
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300423static void nfsd_shutdown_net(struct net *net)
424{
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300425 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
426
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500427 nfsd_file_cache_shutdown_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300428 nfs4_state_shutdown_net(net);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800429 if (nn->lockd_up) {
430 lockd_down(net);
zhengbine44b4bf2019-12-25 11:19:36 +0800431 nn->lockd_up = false;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800432 }
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300433 nn->nfsd_net_up = false;
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300434 nfsd_shutdown_generic();
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300435}
436
NeilBrownd057cfe2021-11-29 15:51:25 +1100437static DEFINE_SPINLOCK(nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500438static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
439 void *ptr)
440{
441 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
442 struct net_device *dev = ifa->ifa_dev->dev;
443 struct net *net = dev_net(dev);
444 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
445 struct sockaddr_in sin;
446
NeilBrownd057cfe2021-11-29 15:51:25 +1100447 if (event != NETDEV_DOWN || !nn->nfsd_serv)
Scott Mayhew36684992015-12-11 16:45:59 -0500448 goto out;
449
NeilBrownd057cfe2021-11-29 15:51:25 +1100450 spin_lock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500451 if (nn->nfsd_serv) {
452 dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
453 sin.sin_family = AF_INET;
454 sin.sin_addr.s_addr = ifa->ifa_local;
455 svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
456 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100457 spin_unlock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500458
459out:
460 return NOTIFY_DONE;
461}
462
463static struct notifier_block nfsd_inetaddr_notifier = {
464 .notifier_call = nfsd_inetaddr_event,
465};
466
467#if IS_ENABLED(CONFIG_IPV6)
468static int nfsd_inet6addr_event(struct notifier_block *this,
469 unsigned long event, void *ptr)
470{
471 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
472 struct net_device *dev = ifa->idev->dev;
473 struct net *net = dev_net(dev);
474 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
475 struct sockaddr_in6 sin6;
476
NeilBrownd057cfe2021-11-29 15:51:25 +1100477 if (event != NETDEV_DOWN || !nn->nfsd_serv)
Scott Mayhew36684992015-12-11 16:45:59 -0500478 goto out;
479
NeilBrownd057cfe2021-11-29 15:51:25 +1100480 spin_lock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500481 if (nn->nfsd_serv) {
482 dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
483 sin6.sin6_family = AF_INET6;
484 sin6.sin6_addr = ifa->addr;
Scott Mayhew7b198242017-01-05 16:34:49 -0500485 if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
486 sin6.sin6_scope_id = ifa->idev->dev->ifindex;
Scott Mayhew36684992015-12-11 16:45:59 -0500487 svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
488 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100489 spin_unlock(&nfsd_notifier_lock);
490
Scott Mayhew36684992015-12-11 16:45:59 -0500491out:
492 return NOTIFY_DONE;
493}
494
495static struct notifier_block nfsd_inet6addr_notifier = {
496 .notifier_call = nfsd_inet6addr_event,
497};
498#endif
499
Vasily Averin1eca45f2016-09-21 15:33:05 +0300500/* Only used under nfsd_mutex, so this atomic may be overkill: */
501static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
502
Stanislav Kinsbursky541e8642012-12-06 14:23:44 +0300503static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
Jeff Layton4ad9a342010-07-19 16:50:04 -0400504{
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300505 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
506
Vasily Averin1eca45f2016-09-21 15:33:05 +0300507 /* check if the notifier still has clients */
508 if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
509 unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500510#if IS_ENABLED(CONFIG_IPV6)
Vasily Averin1eca45f2016-09-21 15:33:05 +0300511 unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500512#endif
Vasily Averin1eca45f2016-09-21 15:33:05 +0300513 }
514
Jeff Layton4ad9a342010-07-19 16:50:04 -0400515 /*
516 * write_ports can create the server without actually starting
517 * any threads--if we get shut down before any threads are
518 * started, then nfsd_last_thread will be run before any of this
Kinglong Mee691412b2016-01-04 11:15:21 +0800519 * other initialization has been done except the rpcb information.
Jeff Layton4ad9a342010-07-19 16:50:04 -0400520 */
Kinglong Mee691412b2016-01-04 11:15:21 +0800521 svc_rpcb_cleanup(serv, net);
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300522 if (!nn->nfsd_net_up)
Jeff Layton4ad9a342010-07-19 16:50:04 -0400523 return;
Kinglong Mee691412b2016-01-04 11:15:21 +0800524
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300525 nfsd_shutdown_net(net);
kazuo ito44204402020-11-27 15:26:59 +0900526 pr_info("nfsd: last server has exited, flushing export cache\n");
Stanislav Kinsburskyb3853e02012-04-11 15:13:21 +0400527 nfsd_export_flush(net);
NeilBrownbc591cc2006-10-02 02:17:44 -0700528}
NeilBrown6658d3a2006-10-02 02:17:46 -0700529
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400530void nfsd_reset_versions(struct nfsd_net *nn)
NeilBrown6658d3a2006-10-02 02:17:46 -0700531{
NeilBrown6658d3a2006-10-02 02:17:46 -0700532 int i;
533
NeilBrown800a9382017-03-10 11:36:39 +1100534 for (i = 0; i < NFSD_NRVERS; i++)
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400535 if (nfsd_vers(nn, i, NFSD_TEST))
NeilBrown800a9382017-03-10 11:36:39 +1100536 return;
NeilBrown6658d3a2006-10-02 02:17:46 -0700537
NeilBrown800a9382017-03-10 11:36:39 +1100538 for (i = 0; i < NFSD_NRVERS; i++)
539 if (i != 4)
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400540 nfsd_vers(nn, i, NFSD_SET);
NeilBrown800a9382017-03-10 11:36:39 +1100541 else {
542 int minor = 0;
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400543 while (nfsd_minorversion(nn, minor, NFSD_SET) >= 0)
NeilBrown800a9382017-03-10 11:36:39 +1100544 minor++;
545 }
NeilBrown6658d3a2006-10-02 02:17:46 -0700546}
547
Andy Adamsonc3d06f92009-04-03 08:28:18 +0300548/*
549 * Each session guarantees a negotiated per slot memory cache for replies
550 * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
551 * NFSv4.1 server might want to use more memory for a DRC than a machine
552 * with mutiple services.
553 *
554 * Impose a hard limit on the number of pages for the DRC which varies
555 * according to the machines free pages. This is of course only a default.
556 *
557 * For now this is a #defined shift which could be under admin control
558 * in the future.
559 */
560static void set_max_drc(void)
561{
J. Bruce Fields44d86602017-09-19 20:51:31 -0400562 #define NFSD_DRC_SIZE_SHIFT 7
Andy Adamson0c193052009-07-27 19:09:19 -0400563 nfsd_drc_max_mem = (nr_free_buffer_pages()
564 >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
565 nfsd_drc_mem_used = 0;
Zhang Yanfei697ce9b2013-02-22 16:35:47 -0800566 dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
Andy Adamsonc3d06f92009-04-03 08:28:18 +0300567}
Neil Brownbedbdd82008-06-10 08:40:35 -0400568
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500569static int nfsd_get_default_max_blksize(void)
570{
571 struct sysinfo i;
572 unsigned long long target;
573 unsigned long ret;
574
575 si_meminfo(&i);
J. Bruce Fields508f9222012-01-30 16:21:11 -0500576 target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500577 /*
578 * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
579 * machines, but only uses 32K on 128M machines. Bottom out at
580 * 8K on 32M and smaller. Of course, this is only a default.
581 */
582 target >>= 12;
583
584 ret = NFSSVC_MAXBLKSIZE;
585 while (ret > target && ret >= 8*1024*2)
586 ret /= 2;
587 return ret;
588}
589
Chuck Leverafea5652017-08-01 12:00:06 -0400590static const struct svc_serv_ops nfsd_thread_sv_ops = {
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700591 .svo_shutdown = nfsd_last_thread,
592 .svo_function = nfsd,
593 .svo_enqueue_xprt = svc_xprt_do_enqueue,
594 .svo_module = THIS_MODULE,
Jeff Laytonea126e72015-06-08 12:03:32 -0700595};
596
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500597void nfsd_shutdown_threads(struct net *net)
598{
599 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
600 struct svc_serv *serv;
601
602 mutex_lock(&nfsd_mutex);
603 serv = nn->nfsd_serv;
604 if (serv == NULL) {
605 mutex_unlock(&nfsd_mutex);
606 return;
607 }
608
609 svc_get(serv);
610 /* Kill outstanding nfsd threads */
NeilBrown3ebdbe52021-11-29 15:51:25 +1100611 svc_set_num_threads(serv, NULL, 0);
NeilBrown8c62d122021-11-29 15:51:25 +1100612 nfsd_put(net);
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500613 mutex_unlock(&nfsd_mutex);
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500614}
615
Ma Feng44fb26c2020-05-11 20:07:08 +0800616bool i_am_nfsd(void)
J. Bruce Fields28df3d12017-07-28 16:35:15 -0400617{
618 return kthread_func(current) == nfsd;
619}
620
Stanislav Kinsbursky67774362012-12-10 12:19:20 +0300621int nfsd_create_serv(struct net *net)
NeilBrown02a375f2006-10-02 02:17:46 -0700622{
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400623 int error;
Stanislav Kinsburskyb9c0ef82012-12-06 14:23:19 +0300624 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
NeilBrownd057cfe2021-11-29 15:51:25 +1100625 struct svc_serv *serv;
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400626
Neil Brownbedbdd82008-06-10 08:40:35 -0400627 WARN_ON(!mutex_is_locked(&nfsd_mutex));
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300628 if (nn->nfsd_serv) {
629 svc_get(nn->nfsd_serv);
NeilBrown02a375f2006-10-02 02:17:46 -0700630 return 0;
631 }
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500632 if (nfsd_max_blksize == 0)
633 nfsd_max_blksize = nfsd_get_default_max_blksize();
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400634 nfsd_reset_versions(nn);
NeilBrownd057cfe2021-11-29 15:51:25 +1100635 serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
636 &nfsd_thread_sv_ops);
637 if (serv == NULL)
Jeff Layton628b3682010-07-21 16:40:08 -0400638 return -ENOMEM;
Neil Brownbedbdd82008-06-10 08:40:35 -0400639
NeilBrownd057cfe2021-11-29 15:51:25 +1100640 serv->sv_maxconn = nn->max_connections;
641 error = svc_bind(serv, net);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400642 if (error < 0) {
NeilBrown8c62d122021-11-29 15:51:25 +1100643 /* NOT nfsd_put() as notifiers (see below) haven't
644 * been set up yet.
645 */
NeilBrownd057cfe2021-11-29 15:51:25 +1100646 svc_put(serv);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400647 return error;
648 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100649 spin_lock(&nfsd_notifier_lock);
650 nn->nfsd_serv = serv;
651 spin_unlock(&nfsd_notifier_lock);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400652
Jeff Layton628b3682010-07-21 16:40:08 -0400653 set_max_drc();
Vasily Averin1eca45f2016-09-21 15:33:05 +0300654 /* check if the notifier is already set */
655 if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
656 register_inetaddr_notifier(&nfsd_inetaddr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500657#if IS_ENABLED(CONFIG_IPV6)
Vasily Averin1eca45f2016-09-21 15:33:05 +0300658 register_inet6addr_notifier(&nfsd_inet6addr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500659#endif
Vasily Averin1eca45f2016-09-21 15:33:05 +0300660 }
Trond Myklebust27c438f2019-09-02 13:02:56 -0400661 nfsd_reset_boot_verifier(nn);
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500662 return 0;
NeilBrown02a375f2006-10-02 02:17:46 -0700663}
664
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300665int nfsd_nrpools(struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700666{
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300667 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
668
669 if (nn->nfsd_serv == NULL)
Greg Bankseed29652006-10-02 02:18:02 -0700670 return 0;
671 else
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300672 return nn->nfsd_serv->sv_nrpools;
Greg Bankseed29652006-10-02 02:18:02 -0700673}
674
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300675int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700676{
677 int i = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300678 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Greg Bankseed29652006-10-02 02:18:02 -0700679
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300680 if (nn->nfsd_serv != NULL) {
681 for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
682 nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
Greg Bankseed29652006-10-02 02:18:02 -0700683 }
684
685 return 0;
686}
687
NeilBrownec523612021-11-29 15:51:25 +1100688/* This is the callback for kref_put() below.
689 * There is no code here as the first thing to be done is
690 * call svc_shutdown_net(), but we cannot get the 'net' from
691 * the kref. So do all the work when kref_put returns true.
692 */
693static void nfsd_noop(struct kref *ref)
694{
695}
696
NeilBrown8c62d122021-11-29 15:51:25 +1100697void nfsd_put(struct net *net)
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300698{
699 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300700
NeilBrownec523612021-11-29 15:51:25 +1100701 if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300702 svc_shutdown_net(nn->nfsd_serv, net);
NeilBrownec523612021-11-29 15:51:25 +1100703 svc_destroy(&nn->nfsd_serv->sv_refcnt);
NeilBrownd057cfe2021-11-29 15:51:25 +1100704 spin_lock(&nfsd_notifier_lock);
NeilBrown3409e4f2021-11-29 15:51:25 +1100705 nn->nfsd_serv = NULL;
NeilBrownd057cfe2021-11-29 15:51:25 +1100706 spin_unlock(&nfsd_notifier_lock);
NeilBrown8c62d122021-11-29 15:51:25 +1100707 }
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300708}
709
Stanislav Kinsbursky3938a0d2012-12-10 12:19:30 +0300710int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700711{
712 int i = 0;
713 int tot = 0;
714 int err = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300715 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Greg Bankseed29652006-10-02 02:18:02 -0700716
Neil Brownbedbdd82008-06-10 08:40:35 -0400717 WARN_ON(!mutex_is_locked(&nfsd_mutex));
718
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300719 if (nn->nfsd_serv == NULL || n <= 0)
Greg Bankseed29652006-10-02 02:18:02 -0700720 return 0;
721
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300722 if (n > nn->nfsd_serv->sv_nrpools)
723 n = nn->nfsd_serv->sv_nrpools;
Greg Bankseed29652006-10-02 02:18:02 -0700724
725 /* enforce a global maximum number of threads */
726 tot = 0;
727 for (i = 0; i < n; i++) {
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800728 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
Greg Bankseed29652006-10-02 02:18:02 -0700729 tot += nthreads[i];
730 }
731 if (tot > NFSD_MAXSERVS) {
732 /* total too large: scale down requested numbers */
733 for (i = 0; i < n && tot > 0; i++) {
NeilBrown3409e4f2021-11-29 15:51:25 +1100734 int new = nthreads[i] * NFSD_MAXSERVS / tot;
Greg Bankseed29652006-10-02 02:18:02 -0700735 tot -= (nthreads[i] - new);
736 nthreads[i] = new;
737 }
738 for (i = 0; i < n && tot > 0; i++) {
739 nthreads[i]--;
740 tot--;
741 }
742 }
743
744 /*
745 * There must always be a thread in pool 0; the admin
746 * can't shut down NFS completely using pool_threads.
747 */
748 if (nthreads[0] == 0)
749 nthreads[0] = 1;
750
751 /* apply the new numbers */
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300752 svc_get(nn->nfsd_serv);
Greg Bankseed29652006-10-02 02:18:02 -0700753 for (i = 0; i < n; i++) {
NeilBrown3ebdbe52021-11-29 15:51:25 +1100754 err = svc_set_num_threads(nn->nfsd_serv,
755 &nn->nfsd_serv->sv_pools[i],
756 nthreads[i]);
Greg Bankseed29652006-10-02 02:18:02 -0700757 if (err)
758 break;
759 }
NeilBrown8c62d122021-11-29 15:51:25 +1100760 nfsd_put(net);
Greg Bankseed29652006-10-02 02:18:02 -0700761 return err;
762}
763
Jeff Laytonac77efb2010-07-20 14:10:22 -0400764/*
765 * Adjust the number of threads and return the new number of threads.
766 * This is also the function that starts the server if necessary, if
767 * this is the first time nrservs is nonzero.
768 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769int
Trond Myklebust4df493a2019-04-09 12:13:37 -0400770nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
772 int error;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400773 bool nfsd_up_before;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300774 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Neil Brownbedbdd82008-06-10 08:40:35 -0400775
776 mutex_lock(&nfsd_mutex);
NeilBrown6658d3a2006-10-02 02:17:46 -0700777 dprintk("nfsd: creating service\n");
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800778
779 nrservs = max(nrservs, 0);
780 nrservs = min(nrservs, NFSD_MAXSERVS);
NeilBrown671e1fc2009-06-16 11:03:20 +1000781 error = 0;
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800782
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300783 if (nrservs == 0 && nn->nfsd_serv == NULL)
NeilBrown671e1fc2009-06-16 11:03:20 +1000784 goto out;
785
Scott Mayhew7627d7d2020-02-19 15:52:15 -0500786 strlcpy(nn->nfsd_name, utsname()->nodename,
787 sizeof(nn->nfsd_name));
788
Stanislav Kinsbursky67774362012-12-10 12:19:20 +0300789 error = nfsd_create_serv(net);
NeilBrown02a375f2006-10-02 02:17:46 -0700790 if (error)
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400791 goto out;
792
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300793 nfsd_up_before = nn->nfsd_net_up;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400794
Vasily Averin70c53072021-04-15 15:00:58 +0300795 error = nfsd_startup_net(net, cred);
J. Bruce Fieldsaf4718f2010-07-21 18:31:42 -0400796 if (error)
NeilBrown8c62d122021-11-29 15:51:25 +1100797 goto out_put;
NeilBrown3ebdbe52021-11-29 15:51:25 +1100798 error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400799 if (error)
800 goto out_shutdown;
NeilBrownec523612021-11-29 15:51:25 +1100801 error = nn->nfsd_serv->sv_nrthreads;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400802out_shutdown:
803 if (error < 0 && !nfsd_up_before)
Stanislav Kinsbursky541e8642012-12-06 14:23:44 +0300804 nfsd_shutdown_net(net);
NeilBrown8c62d122021-11-29 15:51:25 +1100805out_put:
NeilBrownec523612021-11-29 15:51:25 +1100806 /* Threads now hold service active */
807 if (xchg(&nn->keep_active, 0))
808 nfsd_put(net);
NeilBrown8c62d122021-11-29 15:51:25 +1100809 nfsd_put(net);
Jeff Layton4ad9a342010-07-19 16:50:04 -0400810out:
Neil Brownbedbdd82008-06-10 08:40:35 -0400811 mutex_unlock(&nfsd_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return error;
813}
814
Trond Myklebust029be5d2019-04-09 11:46:18 -0400815#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
816static bool
817nfsd_support_acl_version(int vers)
818{
819 if (vers >= NFSD_ACL_MINVERS && vers < NFSD_ACL_NRVERS)
820 return nfsd_acl_version[vers] != NULL;
821 return false;
822}
823
824static int
825nfsd_acl_rpcbind_set(struct net *net, const struct svc_program *progp,
826 u32 version, int family, unsigned short proto,
827 unsigned short port)
828{
829 if (!nfsd_support_acl_version(version) ||
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400830 !nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
Trond Myklebust029be5d2019-04-09 11:46:18 -0400831 return 0;
832 return svc_generic_rpcbind_set(net, progp, version, family,
833 proto, port);
834}
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400835
836static __be32
837nfsd_acl_init_request(struct svc_rqst *rqstp,
838 const struct svc_program *progp,
839 struct svc_process_info *ret)
840{
841 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
842 int i;
843
844 if (likely(nfsd_support_acl_version(rqstp->rq_vers) &&
845 nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
846 return svc_generic_init_request(rqstp, progp, ret);
847
848 ret->mismatch.lovers = NFSD_ACL_NRVERS;
849 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) {
850 if (nfsd_support_acl_version(rqstp->rq_vers) &&
851 nfsd_vers(nn, i, NFSD_TEST)) {
852 ret->mismatch.lovers = i;
853 break;
854 }
855 }
856 if (ret->mismatch.lovers == NFSD_ACL_NRVERS)
857 return rpc_prog_unavail;
858 ret->mismatch.hivers = NFSD_ACL_MINVERS;
859 for (i = NFSD_ACL_NRVERS - 1; i >= NFSD_ACL_MINVERS; i--) {
860 if (nfsd_support_acl_version(rqstp->rq_vers) &&
861 nfsd_vers(nn, i, NFSD_TEST)) {
862 ret->mismatch.hivers = i;
863 break;
864 }
865 }
866 return rpc_prog_mismatch;
867}
Trond Myklebust029be5d2019-04-09 11:46:18 -0400868#endif
869
870static int
871nfsd_rpcbind_set(struct net *net, const struct svc_program *progp,
872 u32 version, int family, unsigned short proto,
873 unsigned short port)
874{
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400875 if (!nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
Trond Myklebust029be5d2019-04-09 11:46:18 -0400876 return 0;
877 return svc_generic_rpcbind_set(net, progp, version, family,
878 proto, port);
879}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400881static __be32
882nfsd_init_request(struct svc_rqst *rqstp,
883 const struct svc_program *progp,
884 struct svc_process_info *ret)
885{
886 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
887 int i;
888
889 if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
890 return svc_generic_init_request(rqstp, progp, ret);
891
892 ret->mismatch.lovers = NFSD_NRVERS;
893 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
894 if (nfsd_vers(nn, i, NFSD_TEST)) {
895 ret->mismatch.lovers = i;
896 break;
897 }
898 }
899 if (ret->mismatch.lovers == NFSD_NRVERS)
900 return rpc_prog_unavail;
901 ret->mismatch.hivers = NFSD_MINVERS;
902 for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
903 if (nfsd_vers(nn, i, NFSD_TEST)) {
904 ret->mismatch.hivers = i;
905 break;
906 }
907 }
908 return rpc_prog_mismatch;
909}
910
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911/*
912 * This is the NFS server kernel thread
913 */
Jeff Layton9867d762008-06-10 08:40:38 -0400914static int
915nfsd(void *vrqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
Jeff Layton9867d762008-06-10 08:40:38 -0400917 struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
Stanislav Kinsbursky88c47662012-12-06 18:34:42 +0300918 struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
919 struct net *net = perm_sock->xpt_net;
Jeff Layton5b8db002014-07-02 16:11:22 -0400920 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
J. Bruce Fields5b444cc2012-08-17 21:47:53 -0400921 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Jeff Layton9867d762008-06-10 08:40:38 -0400923 /* At this point, the thread shares current->fs
Andreas Gruenbacher47057ab2016-01-12 20:24:14 +0100924 * with the init process. We need to create files with the
925 * umask as defined by the client instead of init's umask. */
Al Viro3e93cd62009-03-29 19:00:13 -0400926 if (unshare_fs_struct() < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 printk("Unable to start nfsd thread: out of memory\n");
928 goto out;
929 }
Al Viro3e93cd62009-03-29 19:00:13 -0400930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 current->fs->umask = 0;
932
Jeff Layton9867d762008-06-10 08:40:38 -0400933 /*
934 * thread is spawned with all signals set to SIG_IGN, re-enable
Jeff Layton100766f2008-06-30 14:09:46 -0400935 * the ones that will bring down the thread
Jeff Layton9867d762008-06-10 08:40:38 -0400936 */
Jeff Layton100766f2008-06-30 14:09:46 -0400937 allow_signal(SIGKILL);
938 allow_signal(SIGHUP);
939 allow_signal(SIGINT);
940 allow_signal(SIGQUIT);
Neil Brownbedbdd82008-06-10 08:40:35 -0400941
NeilBrown9b6c8c92021-11-29 15:51:25 +1100942 atomic_inc(&nfsdstats.th_cnt);
Neil Brownbedbdd82008-06-10 08:40:35 -0400943
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700944 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 /*
947 * The main request loop
948 */
949 for (;;) {
Jeff Layton5b8db002014-07-02 16:11:22 -0400950 /* Update sv_maxconn if it has changed */
951 rqstp->rq_server->sv_maxconn = nn->max_connections;
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 /*
954 * Find a socket with data available and call its
955 * recvfrom routine.
956 */
NeilBrown6fb2b472006-10-02 02:17:50 -0700957 while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 ;
Jeff Layton9867d762008-06-10 08:40:38 -0400959 if (err == -EINTR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
David Howellse0e81732009-09-02 09:13:40 +0100961 validate_process_creds();
NeilBrown6fb2b472006-10-02 02:17:50 -0700962 svc_process(rqstp);
David Howellse0e81732009-09-02 09:13:40 +0100963 validate_process_creds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
965
NeilBrown24e36662006-10-02 02:17:45 -0700966 /* Clear signals before calling svc_exit_thread() */
NeilBrown9e4160522005-04-16 15:26:37 -0700967 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
NeilBrown9b6c8c92021-11-29 15:51:25 +1100969 atomic_dec(&nfsdstats.th_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971out:
NeilBrownec523612021-11-29 15:51:25 +1100972 /* Take an extra ref so that the svc_put in svc_exit_thread()
973 * doesn't call svc_destroy()
974 */
975 svc_get(nn->nfsd_serv);
Stanislav Kinsbursky786185b2012-05-04 12:49:41 +0400976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 /* Release the thread */
978 svc_exit_thread(rqstp);
979
NeilBrown3409e4f2021-11-29 15:51:25 +1100980 /* We need to drop a ref, but may not drop the last reference
981 * without holding nfsd_mutex, and we cannot wait for nfsd_mutex as that
982 * could deadlock with nfsd_shutdown_threads() waiting for us.
983 * So three options are:
984 * - drop a non-final reference,
985 * - get the mutex without waiting
986 * - sleep briefly andd try the above again
987 */
988 while (!svc_put_not_last(nn->nfsd_serv)) {
989 if (mutex_trylock(&nfsd_mutex)) {
990 nfsd_put(net);
991 mutex_unlock(&nfsd_mutex);
992 break;
993 }
994 msleep(20);
995 }
Stanislav Kinsbursky57c8b13e2012-07-03 16:46:41 +0400996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 /* Release module */
998 module_put_and_exit(0);
Jeff Layton9867d762008-06-10 08:40:38 -0400999 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000}
1001
Chuck Lever383c4402020-10-01 18:59:23 -04001002/**
1003 * nfsd_dispatch - Process an NFS or NFSACL Request
1004 * @rqstp: incoming request
1005 * @statp: pointer to location of accept_stat field in RPC Reply buffer
1006 *
1007 * This RPC dispatcher integrates the NFS server's duplicate reply cache.
1008 *
1009 * Return values:
1010 * %0: Processing complete; do not send a Reply
1011 * %1: Processing complete; send Reply in rqstp->rq_res
1012 */
1013int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014{
Chuck Lever4c96cb52020-10-01 18:59:28 -04001015 const struct svc_procedure *proc = rqstp->rq_procinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
J. Bruce Fields10910062011-01-24 12:11:02 -05001017 /*
1018 * Give the xdr decoder a chance to change this if it wants
1019 * (necessary in the NFSv4.0 compound case)
1020 */
1021 rqstp->rq_cachetype = proc->pc_cachetype;
Chuck Lever51919552020-11-05 11:19:42 -05001022
1023 svcxdr_init_decode(rqstp);
Chuck Lever16c66362021-10-12 11:57:22 -04001024 if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
Chuck Lever85085aa2020-10-01 18:59:33 -04001025 goto out_decode_err;
J. Bruce Fields10910062011-01-24 12:11:02 -05001026
J. Bruce Fields10910062011-01-24 12:11:02 -05001027 switch (nfsd_cache_lookup(rqstp)) {
Chuck Lever84c138e2020-10-01 18:59:18 -04001028 case RC_DOIT:
1029 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 case RC_REPLY:
Chuck Lever85085aa2020-10-01 18:59:33 -04001031 goto out_cached_reply;
Chuck Lever84c138e2020-10-01 18:59:18 -04001032 case RC_DROPIT:
Chuck Lever85085aa2020-10-01 18:59:33 -04001033 goto out_dropit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 }
1035
Chuck Lever383c4402020-10-01 18:59:23 -04001036 /*
1037 * Need to grab the location to store the status, as
1038 * NFSv4 does some encoding while processing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 */
Chuck Leverbddfdbc2020-10-27 15:53:42 -04001040 svcxdr_init_encode(rqstp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Chuck Levercc028a12020-10-02 15:52:44 -04001042 *statp = proc->pc_func(rqstp);
1043 if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
Chuck Lever85085aa2020-10-01 18:59:33 -04001044 goto out_update_drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Chuck Leverfda49442021-10-13 10:41:06 -04001046 if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
Chuck Leverf0af2212020-10-01 18:59:49 -04001047 goto out_encode_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
J. Bruce Fields57d276d2012-11-16 15:22:43 -05001049 nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
Chuck Lever85085aa2020-10-01 18:59:33 -04001050out_cached_reply:
1051 return 1;
1052
Chuck Lever85085aa2020-10-01 18:59:33 -04001053out_decode_err:
Chuck Lever0dfdad12020-10-19 13:00:29 -04001054 trace_nfsd_garbage_args_err(rqstp);
Chuck Lever85085aa2020-10-01 18:59:33 -04001055 *statp = rpc_garbage_args;
1056 return 1;
1057
1058out_update_drop:
Chuck Lever85085aa2020-10-01 18:59:33 -04001059 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
1060out_dropit:
1061 return 0;
1062
1063out_encode_err:
Chuck Lever0dfdad12020-10-19 13:00:29 -04001064 trace_nfsd_cant_encode_err(rqstp);
Chuck Lever85085aa2020-10-01 18:59:33 -04001065 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
1066 *statp = rpc_system_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 return 1;
1068}
Greg Banks03cf6c92009-01-13 21:26:36 +11001069
Chuck Lever788f7182020-11-05 14:48:29 -05001070/**
1071 * nfssvc_decode_voidarg - Decode void arguments
1072 * @rqstp: Server RPC transaction context
Chuck Lever16c66362021-10-12 11:57:22 -04001073 * @xdr: XDR stream positioned at arguments to decode
Chuck Lever788f7182020-11-05 14:48:29 -05001074 *
1075 * Return values:
Chuck Leverc44b31c2021-10-12 11:57:28 -04001076 * %false: Arguments were not valid
1077 * %true: Decoding was successful
Chuck Lever788f7182020-11-05 14:48:29 -05001078 */
Chuck Leverc44b31c2021-10-12 11:57:28 -04001079bool nfssvc_decode_voidarg(struct svc_rqst *rqstp, struct xdr_stream *xdr)
Chuck Lever788f7182020-11-05 14:48:29 -05001080{
Changcheng Deng291cd652021-10-19 04:14:22 +00001081 return true;
Chuck Lever788f7182020-11-05 14:48:29 -05001082}
1083
1084/**
1085 * nfssvc_encode_voidres - Encode void results
1086 * @rqstp: Server RPC transaction context
Chuck Leverfda49442021-10-13 10:41:06 -04001087 * @xdr: XDR stream into which to encode results
Chuck Lever788f7182020-11-05 14:48:29 -05001088 *
1089 * Return values:
Chuck Lever130e2052021-10-13 10:41:13 -04001090 * %false: Local error while encoding
1091 * %true: Encoding was successful
Chuck Lever788f7182020-11-05 14:48:29 -05001092 */
Chuck Lever130e2052021-10-13 10:41:13 -04001093bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
Chuck Lever788f7182020-11-05 14:48:29 -05001094{
Chuck Lever130e2052021-10-13 10:41:13 -04001095 return true;
Chuck Lever788f7182020-11-05 14:48:29 -05001096}
1097
Greg Banks03cf6c92009-01-13 21:26:36 +11001098int nfsd_pool_stats_open(struct inode *inode, struct file *file)
1099{
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001100 int ret;
Stanislav Kinsbursky11f77942013-02-01 15:56:12 +03001101 struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001102
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001103 mutex_lock(&nfsd_mutex);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001104 if (nn->nfsd_serv == NULL) {
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001105 mutex_unlock(&nfsd_mutex);
Greg Banks03cf6c92009-01-13 21:26:36 +11001106 return -ENODEV;
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001107 }
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001108 svc_get(nn->nfsd_serv);
1109 ret = svc_pool_stats_open(nn->nfsd_serv, file);
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001110 mutex_unlock(&nfsd_mutex);
1111 return ret;
1112}
1113
1114int nfsd_pool_stats_release(struct inode *inode, struct file *file)
1115{
1116 int ret = seq_release(inode, file);
Stanislav Kinsbursky11f77942013-02-01 15:56:12 +03001117 struct net *net = inode->i_sb->s_fs_info;
Stanislav Kinsbursky786185b2012-05-04 12:49:41 +04001118
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001119 mutex_lock(&nfsd_mutex);
NeilBrown8c62d122021-11-29 15:51:25 +11001120 nfsd_put(net);
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001121 mutex_unlock(&nfsd_mutex);
1122 return ret;
Greg Banks03cf6c92009-01-13 21:26:36 +11001123}