blob: b8c682b62d299ac5903f3e914ee5f8d33e445003 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Central processing for nfsd.
4 *
5 * Authors: Olaf Kirch (okir@monad.swb.de)
6 *
7 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
8 */
9
Ingo Molnar3f07c012017-02-08 18:51:30 +010010#include <linux/sched/signal.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070011#include <linux/freezer.h>
Paul Gortmaker143cb492011-07-01 14:23:34 -040012#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/fs_struct.h>
Andy Adamsonc3d06f92009-04-03 08:28:18 +030014#include <linux/swap.h>
Chuck Lever91d2e9b2021-12-29 14:43:16 -050015#include <linux/siphash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/sunrpc/stats.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/sunrpc/svcsock.h>
Scott Mayhew36684992015-12-11 16:45:59 -050019#include <linux/sunrpc/svc_xprt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/lockd/bind.h>
Andreas Gruenbachera257cdd2005-06-22 17:16:26 +000021#include <linux/nfsacl.h>
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +090022#include <linux/seq_file.h>
Scott Mayhew36684992015-12-11 16:45:59 -050023#include <linux/inetdevice.h>
24#include <net/addrconf.h>
25#include <net/ipv6.h>
Pavel Emelyanovfc5d00b2010-09-29 16:03:50 +040026#include <net/net_namespace.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020027#include "nfsd.h"
28#include "cache.h"
J. Bruce Fields0a3adad2009-11-04 18:12:35 -050029#include "vfs.h"
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +030030#include "netns.h"
Jeff Layton65294c12019-08-18 14:18:48 -040031#include "filecache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Chuck Lever0dfdad12020-10-19 13:00:29 -040033#include "trace.h"
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define NFSDDBG_FACILITY NFSDDBG_SVC
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037extern struct svc_program nfsd_program;
Jeff Layton9867d762008-06-10 08:40:38 -040038static int nfsd(void *vrqstp);
Trond Myklebust029be5d2019-04-09 11:46:18 -040039#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
40static int nfsd_acl_rpcbind_set(struct net *,
41 const struct svc_program *,
42 u32, int,
43 unsigned short,
44 unsigned short);
Trond Myklebuste333f3b2019-04-09 11:46:19 -040045static __be32 nfsd_acl_init_request(struct svc_rqst *,
46 const struct svc_program *,
47 struct svc_process_info *);
Trond Myklebust029be5d2019-04-09 11:46:18 -040048#endif
49static int nfsd_rpcbind_set(struct net *,
50 const struct svc_program *,
51 u32, int,
52 unsigned short,
53 unsigned short);
Trond Myklebuste333f3b2019-04-09 11:46:19 -040054static __be32 nfsd_init_request(struct svc_rqst *,
55 const struct svc_program *,
56 struct svc_process_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Neil Brownbedbdd82008-06-10 08:40:35 -040058/*
NeilBrown2a363952021-11-29 15:51:25 +110059 * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
60 * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
Neil Brownbedbdd82008-06-10 08:40:35 -040061 *
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +030062 * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
NeilBrownec523612021-11-29 15:51:25 +110063 * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0 (unless
64 * nn->keep_active is set). That number of nfsd threads must
65 * exist and each must be listed in ->sp_all_threads in some entry of
66 * ->sv_pools[].
Neil Brownbedbdd82008-06-10 08:40:35 -040067 *
NeilBrownec523612021-11-29 15:51:25 +110068 * Each active thread holds a counted reference on nn->nfsd_serv, as does
69 * the nn->keep_active flag and various transient calls to svc_get().
Jeff Layton3dd98a32008-06-10 08:40:36 -040070 *
71 * Finally, the nfsd_mutex also protects some of the global variables that are
72 * accessed when nfsd starts and that are settable via the write_* routines in
73 * nfsctl.c. In particular:
74 *
75 * user_recovery_dirname
76 * user_lease_time
77 * nfsd_versions
Neil Brownbedbdd82008-06-10 08:40:35 -040078 */
79DEFINE_MUTEX(nfsd_mutex);
Neil Brownbedbdd82008-06-10 08:40:35 -040080
Andy Adamson4bd9b0f42009-06-24 15:37:45 -040081/*
82 * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
83 * nfsd_drc_max_pages limits the total amount of memory available for
84 * version 4.1 DRC caches.
85 * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
86 */
Guobin Huangb73ac682021-04-06 20:08:18 +080087DEFINE_SPINLOCK(nfsd_drc_lock);
Zhang Yanfei697ce9b2013-02-22 16:35:47 -080088unsigned long nfsd_drc_max_mem;
89unsigned long nfsd_drc_mem_used;
Andy Adamson4bd9b0f42009-06-24 15:37:45 -040090
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -080091#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
92static struct svc_stat nfsd_acl_svcstats;
Christoph Hellwige9679182017-05-12 16:21:37 +020093static const struct svc_version *nfsd_acl_version[] = {
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -080094 [2] = &nfsd_acl_version2,
95 [3] = &nfsd_acl_version3,
96};
97
98#define NFSD_ACL_MINVERS 2
Tobias Klausere8c96f82006-03-24 03:15:34 -080099#define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800100
101static struct svc_program nfsd_acl_program = {
102 .pg_prog = NFS_ACL_PROGRAM,
103 .pg_nvers = NFSD_ACL_NRVERS,
J. Bruce Fields7c149052019-11-19 16:05:33 -0500104 .pg_vers = nfsd_acl_version,
NeilBrown1a8eff62007-01-26 00:56:58 -0800105 .pg_name = "nfsacl",
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800106 .pg_class = "nfsd",
107 .pg_stats = &nfsd_acl_svcstats,
108 .pg_authenticate = &svc_set_client,
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400109 .pg_init_request = nfsd_acl_init_request,
Trond Myklebust029be5d2019-04-09 11:46:18 -0400110 .pg_rpcbind_set = nfsd_acl_rpcbind_set,
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800111};
112
113static struct svc_stat nfsd_acl_svcstats = {
114 .program = &nfsd_acl_program,
115};
116#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
117
Christoph Hellwige9679182017-05-12 16:21:37 +0200118static const struct svc_version *nfsd_version[] = {
NeilBrown70c3b762005-11-07 01:00:25 -0800119 [2] = &nfsd_version2,
120#if defined(CONFIG_NFSD_V3)
121 [3] = &nfsd_version3,
122#endif
123#if defined(CONFIG_NFSD_V4)
124 [4] = &nfsd_version4,
125#endif
126};
127
128#define NFSD_MINVERS 2
Tobias Klausere8c96f82006-03-24 03:15:34 -0800129#define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
NeilBrown70c3b762005-11-07 01:00:25 -0800130
131struct svc_program nfsd_program = {
Andreas Gruenbacher3fb803a2006-02-01 03:04:34 -0800132#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
133 .pg_next = &nfsd_acl_program,
134#endif
NeilBrown70c3b762005-11-07 01:00:25 -0800135 .pg_prog = NFS_PROGRAM, /* program number */
136 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400137 .pg_vers = nfsd_version, /* version table */
NeilBrown70c3b762005-11-07 01:00:25 -0800138 .pg_name = "nfsd", /* program name */
139 .pg_class = "nfsd", /* authentication class */
140 .pg_stats = &nfsd_svcstats, /* version table */
141 .pg_authenticate = &svc_set_client, /* export authentication */
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400142 .pg_init_request = nfsd_init_request,
Trond Myklebust029be5d2019-04-09 11:46:18 -0400143 .pg_rpcbind_set = nfsd_rpcbind_set,
NeilBrown70c3b762005-11-07 01:00:25 -0800144};
145
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400146static bool
147nfsd_support_version(int vers)
148{
149 if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
150 return nfsd_version[vers] != NULL;
151 return false;
152}
Benny Halevy8daf2202009-04-03 08:28:59 +0300153
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400154static bool *
155nfsd_alloc_versions(void)
156{
157 bool *vers = kmalloc_array(NFSD_NRVERS, sizeof(bool), GFP_KERNEL);
158 unsigned i;
159
160 if (vers) {
161 /* All compiled versions are enabled by default */
162 for (i = 0; i < NFSD_NRVERS; i++)
163 vers[i] = nfsd_support_version(i);
164 }
165 return vers;
166}
167
168static bool *
169nfsd_alloc_minorversions(void)
170{
171 bool *vers = kmalloc_array(NFSD_SUPPORTED_MINOR_VERSION + 1,
172 sizeof(bool), GFP_KERNEL);
173 unsigned i;
174
175 if (vers) {
176 /* All minor versions are enabled by default */
177 for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
178 vers[i] = nfsd_support_version(4);
179 }
180 return vers;
181}
182
183void
184nfsd_netns_free_versions(struct nfsd_net *nn)
185{
186 kfree(nn->nfsd_versions);
187 kfree(nn->nfsd4_minorversions);
188 nn->nfsd_versions = NULL;
189 nn->nfsd4_minorversions = NULL;
190}
191
192static void
193nfsd_netns_init_versions(struct nfsd_net *nn)
194{
195 if (!nn->nfsd_versions) {
196 nn->nfsd_versions = nfsd_alloc_versions();
197 nn->nfsd4_minorversions = nfsd_alloc_minorversions();
198 if (!nn->nfsd_versions || !nn->nfsd4_minorversions)
199 nfsd_netns_free_versions(nn);
200 }
201}
202
203int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change)
NeilBrown6658d3a2006-10-02 02:17:46 -0700204{
205 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
Pavel Emelyanov15ddb4a2010-05-14 15:33:36 +0400206 return 0;
NeilBrown6658d3a2006-10-02 02:17:46 -0700207 switch(change) {
208 case NFSD_SET:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400209 if (nn->nfsd_versions)
210 nn->nfsd_versions[vers] = nfsd_support_version(vers);
NeilBrown1a8eff62007-01-26 00:56:58 -0800211 break;
NeilBrown6658d3a2006-10-02 02:17:46 -0700212 case NFSD_CLEAR:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400213 nfsd_netns_init_versions(nn);
214 if (nn->nfsd_versions)
215 nn->nfsd_versions[vers] = false;
NeilBrown6658d3a2006-10-02 02:17:46 -0700216 break;
217 case NFSD_TEST:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400218 if (nn->nfsd_versions)
219 return nn->nfsd_versions[vers];
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500220 fallthrough;
NeilBrown6658d3a2006-10-02 02:17:46 -0700221 case NFSD_AVAIL:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400222 return nfsd_support_version(vers);
NeilBrown6658d3a2006-10-02 02:17:46 -0700223 }
224 return 0;
225}
Benny Halevy8daf2202009-04-03 08:28:59 +0300226
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500227static void
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400228nfsd_adjust_nfsd_versions4(struct nfsd_net *nn)
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500229{
230 unsigned i;
231
232 for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) {
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400233 if (nn->nfsd4_minorversions[i])
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500234 return;
235 }
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400236 nfsd_vers(nn, 4, NFSD_CLEAR);
Trond Myklebustd3635ff2017-02-22 18:35:32 -0500237}
238
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400239int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change)
Benny Halevy8daf2202009-04-03 08:28:59 +0300240{
NeilBrown928c6fb2017-03-10 11:36:39 +1100241 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
242 change != NFSD_AVAIL)
Benny Halevy8daf2202009-04-03 08:28:59 +0300243 return -1;
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400244
Benny Halevy8daf2202009-04-03 08:28:59 +0300245 switch(change) {
246 case NFSD_SET:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400247 if (nn->nfsd4_minorversions) {
248 nfsd_vers(nn, 4, NFSD_SET);
249 nn->nfsd4_minorversions[minorversion] =
250 nfsd_vers(nn, 4, NFSD_TEST);
251 }
Benny Halevy8daf2202009-04-03 08:28:59 +0300252 break;
253 case NFSD_CLEAR:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400254 nfsd_netns_init_versions(nn);
255 if (nn->nfsd4_minorversions) {
256 nn->nfsd4_minorversions[minorversion] = false;
257 nfsd_adjust_nfsd_versions4(nn);
258 }
Benny Halevy8daf2202009-04-03 08:28:59 +0300259 break;
260 case NFSD_TEST:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400261 if (nn->nfsd4_minorversions)
262 return nn->nfsd4_minorversions[minorversion];
263 return nfsd_vers(nn, 4, NFSD_TEST);
Benny Halevy8daf2202009-04-03 08:28:59 +0300264 case NFSD_AVAIL:
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400265 return minorversion <= NFSD_SUPPORTED_MINOR_VERSION &&
266 nfsd_vers(nn, 4, NFSD_AVAIL);
Benny Halevy8daf2202009-04-03 08:28:59 +0300267 }
268 return 0;
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271/*
272 * Maximum number of nfsd processes
273 */
274#define NFSD_MAXSERVS 8192
275
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300276int nfsd_nrthreads(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
Neil Brownc7d106c2008-06-12 13:38:42 +1000278 int rv = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300279 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
280
Neil Brownc7d106c2008-06-12 13:38:42 +1000281 mutex_lock(&nfsd_mutex);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300282 if (nn->nfsd_serv)
283 rv = nn->nfsd_serv->sv_nrthreads;
Neil Brownc7d106c2008-06-12 13:38:42 +1000284 mutex_unlock(&nfsd_mutex);
285 return rv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Trond Myklebust4df493a2019-04-09 12:13:37 -0400288static int nfsd_init_socks(struct net *net, const struct cred *cred)
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400289{
290 int error;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300291 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
292
293 if (!list_empty(&nn->nfsd_serv->sv_permsocks))
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400294 return 0;
295
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300296 error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400297 SVC_SOCK_DEFAULTS, cred);
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400298 if (error < 0)
299 return error;
300
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300301 error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT,
Trond Myklebust4df493a2019-04-09 12:13:37 -0400302 SVC_SOCK_DEFAULTS, cred);
J. Bruce Fields59db4a02010-07-21 18:29:25 -0400303 if (error < 0)
304 return error;
305
306 return 0;
307}
308
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300309static int nfsd_users = 0;
Jeff Layton4ad9a342010-07-19 16:50:04 -0400310
Vasily Averin70c53072021-04-15 15:00:58 +0300311static int nfsd_startup_generic(void)
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300312{
313 int ret;
314
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300315 if (nfsd_users++)
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300316 return 0;
317
Jeff Layton65294c12019-08-18 14:18:48 -0400318 ret = nfsd_file_cache_init();
319 if (ret)
320 goto dec_users;
Kinglong Meed9499a92014-07-30 21:26:05 +0800321
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300322 ret = nfs4_state_start();
323 if (ret)
Jeff Layton501cb182019-08-18 14:18:56 -0400324 goto out_file_cache;
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300325 return 0;
326
Jeff Layton65294c12019-08-18 14:18:48 -0400327out_file_cache:
328 nfsd_file_cache_shutdown();
Kinglong Meed9499a92014-07-30 21:26:05 +0800329dec_users:
330 nfsd_users--;
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300331 return ret;
332}
333
334static void nfsd_shutdown_generic(void)
335{
Stanislav Kinsbursky4539f142012-12-06 14:23:39 +0300336 if (--nfsd_users)
337 return;
338
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300339 nfs4_state_shutdown();
Jeff Layton65294c12019-08-18 14:18:48 -0400340 nfsd_file_cache_shutdown();
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300341}
342
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400343static bool nfsd_needs_lockd(struct nfsd_net *nn)
Kinglong Mee8ef66712013-12-31 13:17:30 +0800344{
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400345 return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800346}
347
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500348/**
Chuck Lever3988a572021-12-30 10:22:05 -0500349 * nfsd_copy_write_verifier - Atomically copy a write verifier
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500350 * @verf: buffer in which to receive the verifier cookie
351 * @nn: NFS net namespace
352 *
353 * This function provides a wait-free mechanism for copying the
Chuck Lever3988a572021-12-30 10:22:05 -0500354 * namespace's write verifier without tearing it.
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500355 */
Chuck Lever3988a572021-12-30 10:22:05 -0500356void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
Trond Myklebust27c438f2019-09-02 13:02:56 -0400357{
358 int seq = 0;
359
360 do {
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500361 read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
362 memcpy(verf, nn->writeverf, sizeof(*verf));
363 } while (need_seqretry(&nn->writeverf_lock, seq));
364 done_seqretry(&nn->writeverf_lock, seq);
Trond Myklebust27c438f2019-09-02 13:02:56 -0400365}
366
Chuck Lever3988a572021-12-30 10:22:05 -0500367static void nfsd_reset_write_verifier_locked(struct nfsd_net *nn)
Trond Myklebust27c438f2019-09-02 13:02:56 -0400368{
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500369 struct timespec64 now;
370 u64 verf;
371
372 /*
373 * Because the time value is hashed, y2038 time_t overflow
374 * is irrelevant in this usage.
375 */
376 ktime_get_raw_ts64(&now);
377 verf = siphash_2u64(now.tv_sec, now.tv_nsec, &nn->siphash_key);
378 memcpy(nn->writeverf, &verf, sizeof(nn->writeverf));
Trond Myklebust27c438f2019-09-02 13:02:56 -0400379}
380
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500381/**
Chuck Lever3988a572021-12-30 10:22:05 -0500382 * nfsd_reset_write_verifier - Generate a new write verifier
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500383 * @nn: NFS net namespace
384 *
385 * This function updates the ->writeverf field of @nn. This field
386 * contains an opaque cookie that, according to Section 18.32.3 of
387 * RFC 8881, "the client can use to determine whether a server has
388 * changed instance state (e.g., server restart) between a call to
389 * WRITE and a subsequent call to either WRITE or COMMIT. This
390 * cookie MUST be unchanged during a single instance of the NFSv4.1
391 * server and MUST be unique between instances of the NFSv4.1
392 * server."
393 */
Chuck Lever3988a572021-12-30 10:22:05 -0500394void nfsd_reset_write_verifier(struct nfsd_net *nn)
Trond Myklebust27c438f2019-09-02 13:02:56 -0400395{
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500396 write_seqlock(&nn->writeverf_lock);
Chuck Lever3988a572021-12-30 10:22:05 -0500397 nfsd_reset_write_verifier_locked(nn);
Chuck Lever91d2e9b2021-12-29 14:43:16 -0500398 write_sequnlock(&nn->writeverf_lock);
Trond Myklebust27c438f2019-09-02 13:02:56 -0400399}
400
Vasily Averin70c53072021-04-15 15:00:58 +0300401static int nfsd_startup_net(struct net *net, const struct cred *cred)
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300402{
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300403 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300404 int ret;
405
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300406 if (nn->nfsd_net_up)
407 return 0;
408
Vasily Averin70c53072021-04-15 15:00:58 +0300409 ret = nfsd_startup_generic();
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300410 if (ret)
411 return ret;
Trond Myklebust4df493a2019-04-09 12:13:37 -0400412 ret = nfsd_init_socks(net, cred);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300413 if (ret)
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300414 goto out_socks;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800415
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400416 if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
Trond Myklebust40373b12019-04-09 12:13:39 -0400417 ret = lockd_up(net, cred);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800418 if (ret)
419 goto out_socks;
zhengbine44b4bf2019-12-25 11:19:36 +0800420 nn->lockd_up = true;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800421 }
422
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500423 ret = nfsd_file_cache_start_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300424 if (ret)
425 goto out_lockd;
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500426 ret = nfs4_state_start_net(net);
427 if (ret)
428 goto out_filecache;
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300429
Dai Ngof4e44b32021-05-21 15:09:37 -0400430#ifdef CONFIG_NFSD_V4_2_INTER_SSC
431 nfsd4_ssc_init_umount_work(nn);
432#endif
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300433 nn->nfsd_net_up = true;
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300434 return 0;
435
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500436out_filecache:
437 nfsd_file_cache_shutdown_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300438out_lockd:
Kinglong Mee8ef66712013-12-31 13:17:30 +0800439 if (nn->lockd_up) {
440 lockd_down(net);
zhengbine44b4bf2019-12-25 11:19:36 +0800441 nn->lockd_up = false;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800442 }
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300443out_socks:
Stanislav Kinsburskybda9cac2012-12-06 14:23:29 +0300444 nfsd_shutdown_generic();
Jeff Layton4ad9a342010-07-19 16:50:04 -0400445 return ret;
446}
447
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300448static void nfsd_shutdown_net(struct net *net)
449{
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300450 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
451
Trond Myklebust9542e6a2020-01-06 13:18:05 -0500452 nfsd_file_cache_shutdown_net(net);
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300453 nfs4_state_shutdown_net(net);
Kinglong Mee8ef66712013-12-31 13:17:30 +0800454 if (nn->lockd_up) {
455 lockd_down(net);
zhengbine44b4bf2019-12-25 11:19:36 +0800456 nn->lockd_up = false;
Kinglong Mee8ef66712013-12-31 13:17:30 +0800457 }
Stanislav Kinsbursky2c2fe292012-12-06 14:23:14 +0300458 nn->nfsd_net_up = false;
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300459 nfsd_shutdown_generic();
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300460}
461
NeilBrownd057cfe2021-11-29 15:51:25 +1100462static DEFINE_SPINLOCK(nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500463static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
464 void *ptr)
465{
466 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
467 struct net_device *dev = ifa->ifa_dev->dev;
468 struct net *net = dev_net(dev);
469 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
470 struct sockaddr_in sin;
471
NeilBrownd057cfe2021-11-29 15:51:25 +1100472 if (event != NETDEV_DOWN || !nn->nfsd_serv)
Scott Mayhew36684992015-12-11 16:45:59 -0500473 goto out;
474
NeilBrownd057cfe2021-11-29 15:51:25 +1100475 spin_lock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500476 if (nn->nfsd_serv) {
477 dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
478 sin.sin_family = AF_INET;
479 sin.sin_addr.s_addr = ifa->ifa_local;
480 svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
481 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100482 spin_unlock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500483
484out:
485 return NOTIFY_DONE;
486}
487
488static struct notifier_block nfsd_inetaddr_notifier = {
489 .notifier_call = nfsd_inetaddr_event,
490};
491
492#if IS_ENABLED(CONFIG_IPV6)
493static int nfsd_inet6addr_event(struct notifier_block *this,
494 unsigned long event, void *ptr)
495{
496 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
497 struct net_device *dev = ifa->idev->dev;
498 struct net *net = dev_net(dev);
499 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
500 struct sockaddr_in6 sin6;
501
NeilBrownd057cfe2021-11-29 15:51:25 +1100502 if (event != NETDEV_DOWN || !nn->nfsd_serv)
Scott Mayhew36684992015-12-11 16:45:59 -0500503 goto out;
504
NeilBrownd057cfe2021-11-29 15:51:25 +1100505 spin_lock(&nfsd_notifier_lock);
Scott Mayhew36684992015-12-11 16:45:59 -0500506 if (nn->nfsd_serv) {
507 dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
508 sin6.sin6_family = AF_INET6;
509 sin6.sin6_addr = ifa->addr;
Scott Mayhew7b198242017-01-05 16:34:49 -0500510 if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
511 sin6.sin6_scope_id = ifa->idev->dev->ifindex;
Scott Mayhew36684992015-12-11 16:45:59 -0500512 svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
513 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100514 spin_unlock(&nfsd_notifier_lock);
515
Scott Mayhew36684992015-12-11 16:45:59 -0500516out:
517 return NOTIFY_DONE;
518}
519
520static struct notifier_block nfsd_inet6addr_notifier = {
521 .notifier_call = nfsd_inet6addr_event,
522};
523#endif
524
Vasily Averin1eca45f2016-09-21 15:33:05 +0300525/* Only used under nfsd_mutex, so this atomic may be overkill: */
526static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
527
Stanislav Kinsbursky541e8642012-12-06 14:23:44 +0300528static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
Jeff Layton4ad9a342010-07-19 16:50:04 -0400529{
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300530 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
531
Vasily Averin1eca45f2016-09-21 15:33:05 +0300532 /* check if the notifier still has clients */
533 if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
534 unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500535#if IS_ENABLED(CONFIG_IPV6)
Vasily Averin1eca45f2016-09-21 15:33:05 +0300536 unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500537#endif
Vasily Averin1eca45f2016-09-21 15:33:05 +0300538 }
539
Jeff Layton4ad9a342010-07-19 16:50:04 -0400540 /*
541 * write_ports can create the server without actually starting
542 * any threads--if we get shut down before any threads are
543 * started, then nfsd_last_thread will be run before any of this
Kinglong Mee691412b2016-01-04 11:15:21 +0800544 * other initialization has been done except the rpcb information.
Jeff Layton4ad9a342010-07-19 16:50:04 -0400545 */
Kinglong Mee691412b2016-01-04 11:15:21 +0800546 svc_rpcb_cleanup(serv, net);
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300547 if (!nn->nfsd_net_up)
Jeff Layton4ad9a342010-07-19 16:50:04 -0400548 return;
Kinglong Mee691412b2016-01-04 11:15:21 +0800549
Stanislav Kinsbursky6ff50b32012-12-06 14:23:09 +0300550 nfsd_shutdown_net(net);
kazuo ito44204402020-11-27 15:26:59 +0900551 pr_info("nfsd: last server has exited, flushing export cache\n");
Stanislav Kinsburskyb3853e02012-04-11 15:13:21 +0400552 nfsd_export_flush(net);
NeilBrownbc591cc2006-10-02 02:17:44 -0700553}
NeilBrown6658d3a2006-10-02 02:17:46 -0700554
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400555void nfsd_reset_versions(struct nfsd_net *nn)
NeilBrown6658d3a2006-10-02 02:17:46 -0700556{
NeilBrown6658d3a2006-10-02 02:17:46 -0700557 int i;
558
NeilBrown800a9382017-03-10 11:36:39 +1100559 for (i = 0; i < NFSD_NRVERS; i++)
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400560 if (nfsd_vers(nn, i, NFSD_TEST))
NeilBrown800a9382017-03-10 11:36:39 +1100561 return;
NeilBrown6658d3a2006-10-02 02:17:46 -0700562
NeilBrown800a9382017-03-10 11:36:39 +1100563 for (i = 0; i < NFSD_NRVERS; i++)
564 if (i != 4)
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400565 nfsd_vers(nn, i, NFSD_SET);
NeilBrown800a9382017-03-10 11:36:39 +1100566 else {
567 int minor = 0;
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400568 while (nfsd_minorversion(nn, minor, NFSD_SET) >= 0)
NeilBrown800a9382017-03-10 11:36:39 +1100569 minor++;
570 }
NeilBrown6658d3a2006-10-02 02:17:46 -0700571}
572
Andy Adamsonc3d06f92009-04-03 08:28:18 +0300573/*
574 * Each session guarantees a negotiated per slot memory cache for replies
575 * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
576 * NFSv4.1 server might want to use more memory for a DRC than a machine
577 * with mutiple services.
578 *
579 * Impose a hard limit on the number of pages for the DRC which varies
580 * according to the machines free pages. This is of course only a default.
581 *
582 * For now this is a #defined shift which could be under admin control
583 * in the future.
584 */
585static void set_max_drc(void)
586{
J. Bruce Fields44d86602017-09-19 20:51:31 -0400587 #define NFSD_DRC_SIZE_SHIFT 7
Andy Adamson0c193052009-07-27 19:09:19 -0400588 nfsd_drc_max_mem = (nr_free_buffer_pages()
589 >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
590 nfsd_drc_mem_used = 0;
Zhang Yanfei697ce9b2013-02-22 16:35:47 -0800591 dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem);
Andy Adamsonc3d06f92009-04-03 08:28:18 +0300592}
Neil Brownbedbdd82008-06-10 08:40:35 -0400593
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500594static int nfsd_get_default_max_blksize(void)
595{
596 struct sysinfo i;
597 unsigned long long target;
598 unsigned long ret;
599
600 si_meminfo(&i);
J. Bruce Fields508f9222012-01-30 16:21:11 -0500601 target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500602 /*
603 * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
604 * machines, but only uses 32K on 128M machines. Bottom out at
605 * 8K on 32M and smaller. Of course, this is only a default.
606 */
607 target >>= 12;
608
609 ret = NFSSVC_MAXBLKSIZE;
610 while (ret > target && ret >= 8*1024*2)
611 ret /= 2;
612 return ret;
613}
614
Chuck Leverafea5652017-08-01 12:00:06 -0400615static const struct svc_serv_ops nfsd_thread_sv_ops = {
Jeff Laytonb9e13cd2015-06-08 12:06:51 -0700616 .svo_shutdown = nfsd_last_thread,
617 .svo_function = nfsd,
618 .svo_enqueue_xprt = svc_xprt_do_enqueue,
619 .svo_module = THIS_MODULE,
Jeff Laytonea126e72015-06-08 12:03:32 -0700620};
621
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500622void nfsd_shutdown_threads(struct net *net)
623{
624 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
625 struct svc_serv *serv;
626
627 mutex_lock(&nfsd_mutex);
628 serv = nn->nfsd_serv;
629 if (serv == NULL) {
630 mutex_unlock(&nfsd_mutex);
631 return;
632 }
633
634 svc_get(serv);
635 /* Kill outstanding nfsd threads */
NeilBrown3ebdbe52021-11-29 15:51:25 +1100636 svc_set_num_threads(serv, NULL, 0);
NeilBrown8c62d122021-11-29 15:51:25 +1100637 nfsd_put(net);
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500638 mutex_unlock(&nfsd_mutex);
Trond Myklebustc6c7f2a2021-03-13 16:08:47 -0500639}
640
Ma Feng44fb26c2020-05-11 20:07:08 +0800641bool i_am_nfsd(void)
J. Bruce Fields28df3d12017-07-28 16:35:15 -0400642{
643 return kthread_func(current) == nfsd;
644}
645
Stanislav Kinsbursky67774362012-12-10 12:19:20 +0300646int nfsd_create_serv(struct net *net)
NeilBrown02a375f2006-10-02 02:17:46 -0700647{
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400648 int error;
Stanislav Kinsburskyb9c0ef82012-12-06 14:23:19 +0300649 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
NeilBrownd057cfe2021-11-29 15:51:25 +1100650 struct svc_serv *serv;
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400651
Neil Brownbedbdd82008-06-10 08:40:35 -0400652 WARN_ON(!mutex_is_locked(&nfsd_mutex));
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300653 if (nn->nfsd_serv) {
654 svc_get(nn->nfsd_serv);
NeilBrown02a375f2006-10-02 02:17:46 -0700655 return 0;
656 }
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500657 if (nfsd_max_blksize == 0)
658 nfsd_max_blksize = nfsd_get_default_max_blksize();
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400659 nfsd_reset_versions(nn);
NeilBrownd057cfe2021-11-29 15:51:25 +1100660 serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
661 &nfsd_thread_sv_ops);
662 if (serv == NULL)
Jeff Layton628b3682010-07-21 16:40:08 -0400663 return -ENOMEM;
Neil Brownbedbdd82008-06-10 08:40:35 -0400664
NeilBrownd057cfe2021-11-29 15:51:25 +1100665 serv->sv_maxconn = nn->max_connections;
666 error = svc_bind(serv, net);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400667 if (error < 0) {
NeilBrown8c62d122021-11-29 15:51:25 +1100668 /* NOT nfsd_put() as notifiers (see below) haven't
669 * been set up yet.
670 */
NeilBrownd057cfe2021-11-29 15:51:25 +1100671 svc_put(serv);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400672 return error;
673 }
NeilBrownd057cfe2021-11-29 15:51:25 +1100674 spin_lock(&nfsd_notifier_lock);
675 nn->nfsd_serv = serv;
676 spin_unlock(&nfsd_notifier_lock);
Stanislav Kinsbursky9793f7c2012-05-02 16:08:38 +0400677
Jeff Layton628b3682010-07-21 16:40:08 -0400678 set_max_drc();
Vasily Averin1eca45f2016-09-21 15:33:05 +0300679 /* check if the notifier is already set */
680 if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
681 register_inetaddr_notifier(&nfsd_inetaddr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500682#if IS_ENABLED(CONFIG_IPV6)
Vasily Averin1eca45f2016-09-21 15:33:05 +0300683 register_inet6addr_notifier(&nfsd_inet6addr_notifier);
Scott Mayhew36684992015-12-11 16:45:59 -0500684#endif
Vasily Averin1eca45f2016-09-21 15:33:05 +0300685 }
Chuck Lever3988a572021-12-30 10:22:05 -0500686 nfsd_reset_write_verifier(nn);
J. Bruce Fields87b0fc72012-01-30 16:18:35 -0500687 return 0;
NeilBrown02a375f2006-10-02 02:17:46 -0700688}
689
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300690int nfsd_nrpools(struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700691{
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300692 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
693
694 if (nn->nfsd_serv == NULL)
Greg Bankseed29652006-10-02 02:18:02 -0700695 return 0;
696 else
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300697 return nn->nfsd_serv->sv_nrpools;
Greg Bankseed29652006-10-02 02:18:02 -0700698}
699
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300700int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700701{
702 int i = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300703 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Greg Bankseed29652006-10-02 02:18:02 -0700704
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300705 if (nn->nfsd_serv != NULL) {
706 for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
707 nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
Greg Bankseed29652006-10-02 02:18:02 -0700708 }
709
710 return 0;
711}
712
NeilBrownec523612021-11-29 15:51:25 +1100713/* This is the callback for kref_put() below.
714 * There is no code here as the first thing to be done is
715 * call svc_shutdown_net(), but we cannot get the 'net' from
716 * the kref. So do all the work when kref_put returns true.
717 */
718static void nfsd_noop(struct kref *ref)
719{
720}
721
NeilBrown8c62d122021-11-29 15:51:25 +1100722void nfsd_put(struct net *net)
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300723{
724 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300725
NeilBrownec523612021-11-29 15:51:25 +1100726 if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300727 svc_shutdown_net(nn->nfsd_serv, net);
NeilBrownec523612021-11-29 15:51:25 +1100728 svc_destroy(&nn->nfsd_serv->sv_refcnt);
NeilBrownd057cfe2021-11-29 15:51:25 +1100729 spin_lock(&nfsd_notifier_lock);
NeilBrown3409e4f2021-11-29 15:51:25 +1100730 nn->nfsd_serv = NULL;
NeilBrownd057cfe2021-11-29 15:51:25 +1100731 spin_unlock(&nfsd_notifier_lock);
NeilBrown8c62d122021-11-29 15:51:25 +1100732 }
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300733}
734
Stanislav Kinsbursky3938a0d2012-12-10 12:19:30 +0300735int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
Greg Bankseed29652006-10-02 02:18:02 -0700736{
737 int i = 0;
738 int tot = 0;
739 int err = 0;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300740 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Greg Bankseed29652006-10-02 02:18:02 -0700741
Neil Brownbedbdd82008-06-10 08:40:35 -0400742 WARN_ON(!mutex_is_locked(&nfsd_mutex));
743
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300744 if (nn->nfsd_serv == NULL || n <= 0)
Greg Bankseed29652006-10-02 02:18:02 -0700745 return 0;
746
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300747 if (n > nn->nfsd_serv->sv_nrpools)
748 n = nn->nfsd_serv->sv_nrpools;
Greg Bankseed29652006-10-02 02:18:02 -0700749
750 /* enforce a global maximum number of threads */
751 tot = 0;
752 for (i = 0; i < n; i++) {
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800753 nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
Greg Bankseed29652006-10-02 02:18:02 -0700754 tot += nthreads[i];
755 }
756 if (tot > NFSD_MAXSERVS) {
757 /* total too large: scale down requested numbers */
758 for (i = 0; i < n && tot > 0; i++) {
NeilBrown3409e4f2021-11-29 15:51:25 +1100759 int new = nthreads[i] * NFSD_MAXSERVS / tot;
Greg Bankseed29652006-10-02 02:18:02 -0700760 tot -= (nthreads[i] - new);
761 nthreads[i] = new;
762 }
763 for (i = 0; i < n && tot > 0; i++) {
764 nthreads[i]--;
765 tot--;
766 }
767 }
768
769 /*
770 * There must always be a thread in pool 0; the admin
771 * can't shut down NFS completely using pool_threads.
772 */
773 if (nthreads[0] == 0)
774 nthreads[0] = 1;
775
776 /* apply the new numbers */
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300777 svc_get(nn->nfsd_serv);
Greg Bankseed29652006-10-02 02:18:02 -0700778 for (i = 0; i < n; i++) {
NeilBrown3ebdbe52021-11-29 15:51:25 +1100779 err = svc_set_num_threads(nn->nfsd_serv,
780 &nn->nfsd_serv->sv_pools[i],
781 nthreads[i]);
Greg Bankseed29652006-10-02 02:18:02 -0700782 if (err)
783 break;
784 }
NeilBrown8c62d122021-11-29 15:51:25 +1100785 nfsd_put(net);
Greg Bankseed29652006-10-02 02:18:02 -0700786 return err;
787}
788
Jeff Laytonac77efb2010-07-20 14:10:22 -0400789/*
790 * Adjust the number of threads and return the new number of threads.
791 * This is also the function that starts the server if necessary, if
792 * this is the first time nrservs is nonzero.
793 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794int
Trond Myklebust4df493a2019-04-09 12:13:37 -0400795nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
797 int error;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400798 bool nfsd_up_before;
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300799 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
Neil Brownbedbdd82008-06-10 08:40:35 -0400800
801 mutex_lock(&nfsd_mutex);
NeilBrown6658d3a2006-10-02 02:17:46 -0700802 dprintk("nfsd: creating service\n");
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800803
804 nrservs = max(nrservs, 0);
805 nrservs = min(nrservs, NFSD_MAXSERVS);
NeilBrown671e1fc2009-06-16 11:03:20 +1000806 error = 0;
Kinglong Mee3c7aa152014-06-10 18:08:19 +0800807
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +0300808 if (nrservs == 0 && nn->nfsd_serv == NULL)
NeilBrown671e1fc2009-06-16 11:03:20 +1000809 goto out;
810
Scott Mayhew7627d7d2020-02-19 15:52:15 -0500811 strlcpy(nn->nfsd_name, utsname()->nodename,
812 sizeof(nn->nfsd_name));
813
Stanislav Kinsbursky67774362012-12-10 12:19:20 +0300814 error = nfsd_create_serv(net);
NeilBrown02a375f2006-10-02 02:17:46 -0700815 if (error)
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400816 goto out;
817
Stanislav Kinsbursky903d9bf2012-12-06 14:23:34 +0300818 nfsd_up_before = nn->nfsd_net_up;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400819
Vasily Averin70c53072021-04-15 15:00:58 +0300820 error = nfsd_startup_net(net, cred);
J. Bruce Fieldsaf4718f2010-07-21 18:31:42 -0400821 if (error)
NeilBrown8c62d122021-11-29 15:51:25 +1100822 goto out_put;
NeilBrown3ebdbe52021-11-29 15:51:25 +1100823 error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400824 if (error)
825 goto out_shutdown;
NeilBrownec523612021-11-29 15:51:25 +1100826 error = nn->nfsd_serv->sv_nrthreads;
J. Bruce Fields774f8bb2010-08-02 14:12:44 -0400827out_shutdown:
828 if (error < 0 && !nfsd_up_before)
Stanislav Kinsbursky541e8642012-12-06 14:23:44 +0300829 nfsd_shutdown_net(net);
NeilBrown8c62d122021-11-29 15:51:25 +1100830out_put:
NeilBrownec523612021-11-29 15:51:25 +1100831 /* Threads now hold service active */
832 if (xchg(&nn->keep_active, 0))
833 nfsd_put(net);
NeilBrown8c62d122021-11-29 15:51:25 +1100834 nfsd_put(net);
Jeff Layton4ad9a342010-07-19 16:50:04 -0400835out:
Neil Brownbedbdd82008-06-10 08:40:35 -0400836 mutex_unlock(&nfsd_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return error;
838}
839
Trond Myklebust029be5d2019-04-09 11:46:18 -0400840#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
841static bool
842nfsd_support_acl_version(int vers)
843{
844 if (vers >= NFSD_ACL_MINVERS && vers < NFSD_ACL_NRVERS)
845 return nfsd_acl_version[vers] != NULL;
846 return false;
847}
848
849static int
850nfsd_acl_rpcbind_set(struct net *net, const struct svc_program *progp,
851 u32 version, int family, unsigned short proto,
852 unsigned short port)
853{
854 if (!nfsd_support_acl_version(version) ||
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400855 !nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
Trond Myklebust029be5d2019-04-09 11:46:18 -0400856 return 0;
857 return svc_generic_rpcbind_set(net, progp, version, family,
858 proto, port);
859}
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400860
861static __be32
862nfsd_acl_init_request(struct svc_rqst *rqstp,
863 const struct svc_program *progp,
864 struct svc_process_info *ret)
865{
866 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
867 int i;
868
869 if (likely(nfsd_support_acl_version(rqstp->rq_vers) &&
870 nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
871 return svc_generic_init_request(rqstp, progp, ret);
872
873 ret->mismatch.lovers = NFSD_ACL_NRVERS;
874 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) {
875 if (nfsd_support_acl_version(rqstp->rq_vers) &&
876 nfsd_vers(nn, i, NFSD_TEST)) {
877 ret->mismatch.lovers = i;
878 break;
879 }
880 }
881 if (ret->mismatch.lovers == NFSD_ACL_NRVERS)
882 return rpc_prog_unavail;
883 ret->mismatch.hivers = NFSD_ACL_MINVERS;
884 for (i = NFSD_ACL_NRVERS - 1; i >= NFSD_ACL_MINVERS; i--) {
885 if (nfsd_support_acl_version(rqstp->rq_vers) &&
886 nfsd_vers(nn, i, NFSD_TEST)) {
887 ret->mismatch.hivers = i;
888 break;
889 }
890 }
891 return rpc_prog_mismatch;
892}
Trond Myklebust029be5d2019-04-09 11:46:18 -0400893#endif
894
895static int
896nfsd_rpcbind_set(struct net *net, const struct svc_program *progp,
897 u32 version, int family, unsigned short proto,
898 unsigned short port)
899{
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400900 if (!nfsd_vers(net_generic(net, nfsd_net_id), version, NFSD_TEST))
Trond Myklebust029be5d2019-04-09 11:46:18 -0400901 return 0;
902 return svc_generic_rpcbind_set(net, progp, version, family,
903 proto, port);
904}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
Trond Myklebuste333f3b2019-04-09 11:46:19 -0400906static __be32
907nfsd_init_request(struct svc_rqst *rqstp,
908 const struct svc_program *progp,
909 struct svc_process_info *ret)
910{
911 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
912 int i;
913
914 if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST)))
915 return svc_generic_init_request(rqstp, progp, ret);
916
917 ret->mismatch.lovers = NFSD_NRVERS;
918 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
919 if (nfsd_vers(nn, i, NFSD_TEST)) {
920 ret->mismatch.lovers = i;
921 break;
922 }
923 }
924 if (ret->mismatch.lovers == NFSD_NRVERS)
925 return rpc_prog_unavail;
926 ret->mismatch.hivers = NFSD_MINVERS;
927 for (i = NFSD_NRVERS - 1; i >= NFSD_MINVERS; i--) {
928 if (nfsd_vers(nn, i, NFSD_TEST)) {
929 ret->mismatch.hivers = i;
930 break;
931 }
932 }
933 return rpc_prog_mismatch;
934}
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936/*
937 * This is the NFS server kernel thread
938 */
Jeff Layton9867d762008-06-10 08:40:38 -0400939static int
940nfsd(void *vrqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941{
Jeff Layton9867d762008-06-10 08:40:38 -0400942 struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
Stanislav Kinsbursky88c47662012-12-06 18:34:42 +0300943 struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
944 struct net *net = perm_sock->xpt_net;
Jeff Layton5b8db002014-07-02 16:11:22 -0400945 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
J. Bruce Fields5b444cc2012-08-17 21:47:53 -0400946 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Jeff Layton9867d762008-06-10 08:40:38 -0400948 /* At this point, the thread shares current->fs
Andreas Gruenbacher47057ab2016-01-12 20:24:14 +0100949 * with the init process. We need to create files with the
950 * umask as defined by the client instead of init's umask. */
Al Viro3e93cd62009-03-29 19:00:13 -0400951 if (unshare_fs_struct() < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 printk("Unable to start nfsd thread: out of memory\n");
953 goto out;
954 }
Al Viro3e93cd62009-03-29 19:00:13 -0400955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 current->fs->umask = 0;
957
Jeff Layton9867d762008-06-10 08:40:38 -0400958 /*
959 * thread is spawned with all signals set to SIG_IGN, re-enable
Jeff Layton100766f2008-06-30 14:09:46 -0400960 * the ones that will bring down the thread
Jeff Layton9867d762008-06-10 08:40:38 -0400961 */
Jeff Layton100766f2008-06-30 14:09:46 -0400962 allow_signal(SIGKILL);
963 allow_signal(SIGHUP);
964 allow_signal(SIGINT);
965 allow_signal(SIGQUIT);
Neil Brownbedbdd82008-06-10 08:40:35 -0400966
NeilBrown9b6c8c92021-11-29 15:51:25 +1100967 atomic_inc(&nfsdstats.th_cnt);
Neil Brownbedbdd82008-06-10 08:40:35 -0400968
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700969 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 /*
972 * The main request loop
973 */
974 for (;;) {
Jeff Layton5b8db002014-07-02 16:11:22 -0400975 /* Update sv_maxconn if it has changed */
976 rqstp->rq_server->sv_maxconn = nn->max_connections;
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 /*
979 * Find a socket with data available and call its
980 * recvfrom routine.
981 */
NeilBrown6fb2b472006-10-02 02:17:50 -0700982 while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 ;
Jeff Layton9867d762008-06-10 08:40:38 -0400984 if (err == -EINTR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 break;
David Howellse0e81732009-09-02 09:13:40 +0100986 validate_process_creds();
NeilBrown6fb2b472006-10-02 02:17:50 -0700987 svc_process(rqstp);
David Howellse0e81732009-09-02 09:13:40 +0100988 validate_process_creds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 }
990
NeilBrown24e36662006-10-02 02:17:45 -0700991 /* Clear signals before calling svc_exit_thread() */
NeilBrown9e4160522005-04-16 15:26:37 -0700992 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
NeilBrown9b6c8c92021-11-29 15:51:25 +1100994 atomic_dec(&nfsdstats.th_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
996out:
NeilBrownec523612021-11-29 15:51:25 +1100997 /* Take an extra ref so that the svc_put in svc_exit_thread()
998 * doesn't call svc_destroy()
999 */
1000 svc_get(nn->nfsd_serv);
Stanislav Kinsbursky786185b2012-05-04 12:49:41 +04001001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 /* Release the thread */
1003 svc_exit_thread(rqstp);
1004
NeilBrown3409e4f2021-11-29 15:51:25 +11001005 /* We need to drop a ref, but may not drop the last reference
1006 * without holding nfsd_mutex, and we cannot wait for nfsd_mutex as that
1007 * could deadlock with nfsd_shutdown_threads() waiting for us.
1008 * So three options are:
1009 * - drop a non-final reference,
1010 * - get the mutex without waiting
1011 * - sleep briefly andd try the above again
1012 */
1013 while (!svc_put_not_last(nn->nfsd_serv)) {
1014 if (mutex_trylock(&nfsd_mutex)) {
1015 nfsd_put(net);
1016 mutex_unlock(&nfsd_mutex);
1017 break;
1018 }
1019 msleep(20);
1020 }
Stanislav Kinsbursky57c8b13e2012-07-03 16:46:41 +04001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 /* Release module */
Eric W. Biedermanca3574b2021-12-03 11:00:19 -06001023 module_put_and_kthread_exit(0);
Jeff Layton9867d762008-06-10 08:40:38 -04001024 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025}
1026
Chuck Lever383c4402020-10-01 18:59:23 -04001027/**
1028 * nfsd_dispatch - Process an NFS or NFSACL Request
1029 * @rqstp: incoming request
1030 * @statp: pointer to location of accept_stat field in RPC Reply buffer
1031 *
1032 * This RPC dispatcher integrates the NFS server's duplicate reply cache.
1033 *
1034 * Return values:
1035 * %0: Processing complete; do not send a Reply
1036 * %1: Processing complete; send Reply in rqstp->rq_res
1037 */
1038int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
Chuck Lever4c96cb52020-10-01 18:59:28 -04001040 const struct svc_procedure *proc = rqstp->rq_procinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
J. Bruce Fields10910062011-01-24 12:11:02 -05001042 /*
1043 * Give the xdr decoder a chance to change this if it wants
1044 * (necessary in the NFSv4.0 compound case)
1045 */
1046 rqstp->rq_cachetype = proc->pc_cachetype;
Chuck Lever51919552020-11-05 11:19:42 -05001047
1048 svcxdr_init_decode(rqstp);
Chuck Lever16c66362021-10-12 11:57:22 -04001049 if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
Chuck Lever85085aa2020-10-01 18:59:33 -04001050 goto out_decode_err;
J. Bruce Fields10910062011-01-24 12:11:02 -05001051
J. Bruce Fields10910062011-01-24 12:11:02 -05001052 switch (nfsd_cache_lookup(rqstp)) {
Chuck Lever84c138e2020-10-01 18:59:18 -04001053 case RC_DOIT:
1054 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 case RC_REPLY:
Chuck Lever85085aa2020-10-01 18:59:33 -04001056 goto out_cached_reply;
Chuck Lever84c138e2020-10-01 18:59:18 -04001057 case RC_DROPIT:
Chuck Lever85085aa2020-10-01 18:59:33 -04001058 goto out_dropit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 }
1060
Chuck Lever383c4402020-10-01 18:59:23 -04001061 /*
1062 * Need to grab the location to store the status, as
1063 * NFSv4 does some encoding while processing
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 */
Chuck Leverbddfdbc2020-10-27 15:53:42 -04001065 svcxdr_init_encode(rqstp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Chuck Levercc028a12020-10-02 15:52:44 -04001067 *statp = proc->pc_func(rqstp);
1068 if (*statp == rpc_drop_reply || test_bit(RQ_DROPME, &rqstp->rq_flags))
Chuck Lever85085aa2020-10-01 18:59:33 -04001069 goto out_update_drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
Chuck Leverfda49442021-10-13 10:41:06 -04001071 if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
Chuck Leverf0af2212020-10-01 18:59:49 -04001072 goto out_encode_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
J. Bruce Fields57d276d2012-11-16 15:22:43 -05001074 nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
Chuck Lever85085aa2020-10-01 18:59:33 -04001075out_cached_reply:
1076 return 1;
1077
Chuck Lever85085aa2020-10-01 18:59:33 -04001078out_decode_err:
Chuck Lever0dfdad12020-10-19 13:00:29 -04001079 trace_nfsd_garbage_args_err(rqstp);
Chuck Lever85085aa2020-10-01 18:59:33 -04001080 *statp = rpc_garbage_args;
1081 return 1;
1082
1083out_update_drop:
Chuck Lever85085aa2020-10-01 18:59:33 -04001084 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
1085out_dropit:
1086 return 0;
1087
1088out_encode_err:
Chuck Lever0dfdad12020-10-19 13:00:29 -04001089 trace_nfsd_cant_encode_err(rqstp);
Chuck Lever85085aa2020-10-01 18:59:33 -04001090 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
1091 *statp = rpc_system_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 return 1;
1093}
Greg Banks03cf6c92009-01-13 21:26:36 +11001094
Chuck Lever788f7182020-11-05 14:48:29 -05001095/**
1096 * nfssvc_decode_voidarg - Decode void arguments
1097 * @rqstp: Server RPC transaction context
Chuck Lever16c66362021-10-12 11:57:22 -04001098 * @xdr: XDR stream positioned at arguments to decode
Chuck Lever788f7182020-11-05 14:48:29 -05001099 *
1100 * Return values:
Chuck Leverc44b31c2021-10-12 11:57:28 -04001101 * %false: Arguments were not valid
1102 * %true: Decoding was successful
Chuck Lever788f7182020-11-05 14:48:29 -05001103 */
Chuck Leverc44b31c2021-10-12 11:57:28 -04001104bool nfssvc_decode_voidarg(struct svc_rqst *rqstp, struct xdr_stream *xdr)
Chuck Lever788f7182020-11-05 14:48:29 -05001105{
Changcheng Deng291cd652021-10-19 04:14:22 +00001106 return true;
Chuck Lever788f7182020-11-05 14:48:29 -05001107}
1108
1109/**
1110 * nfssvc_encode_voidres - Encode void results
1111 * @rqstp: Server RPC transaction context
Chuck Leverfda49442021-10-13 10:41:06 -04001112 * @xdr: XDR stream into which to encode results
Chuck Lever788f7182020-11-05 14:48:29 -05001113 *
1114 * Return values:
Chuck Lever130e2052021-10-13 10:41:13 -04001115 * %false: Local error while encoding
1116 * %true: Encoding was successful
Chuck Lever788f7182020-11-05 14:48:29 -05001117 */
Chuck Lever130e2052021-10-13 10:41:13 -04001118bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
Chuck Lever788f7182020-11-05 14:48:29 -05001119{
Chuck Lever130e2052021-10-13 10:41:13 -04001120 return true;
Chuck Lever788f7182020-11-05 14:48:29 -05001121}
1122
Greg Banks03cf6c92009-01-13 21:26:36 +11001123int nfsd_pool_stats_open(struct inode *inode, struct file *file)
1124{
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001125 int ret;
Stanislav Kinsbursky11f77942013-02-01 15:56:12 +03001126 struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001127
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001128 mutex_lock(&nfsd_mutex);
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001129 if (nn->nfsd_serv == NULL) {
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001130 mutex_unlock(&nfsd_mutex);
Greg Banks03cf6c92009-01-13 21:26:36 +11001131 return -ENODEV;
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001132 }
Stanislav Kinsbursky9dd98452012-12-06 14:23:24 +03001133 svc_get(nn->nfsd_serv);
1134 ret = svc_pool_stats_open(nn->nfsd_serv, file);
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001135 mutex_unlock(&nfsd_mutex);
1136 return ret;
1137}
1138
1139int nfsd_pool_stats_release(struct inode *inode, struct file *file)
1140{
1141 int ret = seq_release(inode, file);
Stanislav Kinsbursky11f77942013-02-01 15:56:12 +03001142 struct net *net = inode->i_sb->s_fs_info;
Stanislav Kinsbursky786185b2012-05-04 12:49:41 +04001143
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001144 mutex_lock(&nfsd_mutex);
NeilBrown8c62d122021-11-29 15:51:25 +11001145 nfsd_put(net);
Ryusei Yamaguchied2d8ae2009-08-16 00:54:41 +09001146 mutex_unlock(&nfsd_mutex);
1147 return ret;
Greg Banks03cf6c92009-01-13 21:26:36 +11001148}