blob: 0a3e5c2aac4b4f60d65ced5fad072c2395f4b5a4 [file] [log] [blame]
Jeff Layton65294c12019-08-18 14:18:48 -04001/*
2 * Open file cache.
3 *
4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
5 */
6
7#include <linux/hash.h>
8#include <linux/slab.h>
Jeff Layton65294c12019-08-18 14:18:48 -04009#include <linux/file.h>
10#include <linux/sched.h>
11#include <linux/list_lru.h>
12#include <linux/fsnotify_backend.h>
13#include <linux/fsnotify.h>
14#include <linux/seq_file.h>
15
16#include "vfs.h"
17#include "nfsd.h"
18#include "nfsfh.h"
Trond Myklebust5e113222019-09-02 13:02:55 -040019#include "netns.h"
Jeff Layton65294c12019-08-18 14:18:48 -040020#include "filecache.h"
21#include "trace.h"
22
23#define NFSDDBG_FACILITY NFSDDBG_FH
24
25/* FIXME: dynamically size this for the machine somehow? */
26#define NFSD_FILE_HASH_BITS 12
27#define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
28#define NFSD_LAUNDRETTE_DELAY (2 * HZ)
29
30#define NFSD_FILE_LRU_RESCAN (0)
31#define NFSD_FILE_SHUTDOWN (1)
32#define NFSD_FILE_LRU_THRESHOLD (4096UL)
33#define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
34
35/* We only care about NFSD_MAY_READ/WRITE for this cache */
36#define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
37
38struct nfsd_fcache_bucket {
39 struct hlist_head nfb_head;
40 spinlock_t nfb_lock;
41 unsigned int nfb_count;
42 unsigned int nfb_maxcount;
43};
44
45static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46
47static struct kmem_cache *nfsd_file_slab;
48static struct kmem_cache *nfsd_file_mark_slab;
49static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
50static struct list_lru nfsd_file_lru;
51static long nfsd_file_lru_flags;
52static struct fsnotify_group *nfsd_file_fsnotify_group;
53static atomic_long_t nfsd_filecache_count;
54static struct delayed_work nfsd_filecache_laundrette;
55
56enum nfsd_file_laundrette_ctl {
57 NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
58 NFSD_FILE_LAUNDRETTE_MAY_FLUSH
59};
60
61static void
62nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
63{
64 long count = atomic_long_read(&nfsd_filecache_count);
65
66 if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
67 return;
68
69 /* Be more aggressive about scanning if over the threshold */
70 if (count > NFSD_FILE_LRU_THRESHOLD)
71 mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
72 else
73 schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
74
75 if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
76 return;
77
78 /* ...and don't delay flushing if we're out of control */
79 if (count >= NFSD_FILE_LRU_LIMIT)
80 flush_delayed_work(&nfsd_filecache_laundrette);
81}
82
83static void
84nfsd_file_slab_free(struct rcu_head *rcu)
85{
86 struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
87
88 put_cred(nf->nf_cred);
89 kmem_cache_free(nfsd_file_slab, nf);
90}
91
92static void
93nfsd_file_mark_free(struct fsnotify_mark *mark)
94{
95 struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
96 nfm_mark);
97
98 kmem_cache_free(nfsd_file_mark_slab, nfm);
99}
100
101static struct nfsd_file_mark *
102nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103{
104 if (!atomic_inc_not_zero(&nfm->nfm_ref))
105 return NULL;
106 return nfm;
107}
108
109static void
110nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111{
112 if (atomic_dec_and_test(&nfm->nfm_ref)) {
113
114 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
115 fsnotify_put_mark(&nfm->nfm_mark);
116 }
117}
118
119static struct nfsd_file_mark *
120nfsd_file_mark_find_or_create(struct nfsd_file *nf)
121{
122 int err;
123 struct fsnotify_mark *mark;
124 struct nfsd_file_mark *nfm = NULL, *new;
125 struct inode *inode = nf->nf_inode;
126
127 do {
128 mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
129 mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
130 nfsd_file_fsnotify_group);
131 if (mark) {
132 nfm = nfsd_file_mark_get(container_of(mark,
133 struct nfsd_file_mark,
134 nfm_mark));
135 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
136 fsnotify_put_mark(mark);
137 if (likely(nfm))
138 break;
139 } else
140 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
141
142 /* allocate a new nfm */
143 new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
144 if (!new)
145 return NULL;
146 fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
147 new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
148 atomic_set(&new->nfm_ref, 1);
149
150 err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
151
152 /*
153 * If the add was successful, then return the object.
154 * Otherwise, we need to put the reference we hold on the
155 * nfm_mark. The fsnotify code will take a reference and put
156 * it on failure, so we can't just free it directly. It's also
157 * not safe to call fsnotify_destroy_mark on it as the
158 * mark->group will be NULL. Thus, we can't let the nfm_ref
159 * counter drive the destruction at this point.
160 */
161 if (likely(!err))
162 nfm = new;
163 else
164 fsnotify_put_mark(&new->nfm_mark);
165 } while (unlikely(err == -EEXIST));
166
167 return nfm;
168}
169
170static struct nfsd_file *
Trond Myklebust5e113222019-09-02 13:02:55 -0400171nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
172 struct net *net)
Jeff Layton65294c12019-08-18 14:18:48 -0400173{
174 struct nfsd_file *nf;
175
176 nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
177 if (nf) {
178 INIT_HLIST_NODE(&nf->nf_node);
179 INIT_LIST_HEAD(&nf->nf_lru);
180 nf->nf_file = NULL;
181 nf->nf_cred = get_current_cred();
Trond Myklebust5e113222019-09-02 13:02:55 -0400182 nf->nf_net = net;
Jeff Layton65294c12019-08-18 14:18:48 -0400183 nf->nf_flags = 0;
184 nf->nf_inode = inode;
185 nf->nf_hashval = hashval;
186 atomic_set(&nf->nf_ref, 1);
187 nf->nf_may = may & NFSD_FILE_MAY_MASK;
188 if (may & NFSD_MAY_NOT_BREAK_LEASE) {
189 if (may & NFSD_MAY_WRITE)
190 __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
191 if (may & NFSD_MAY_READ)
192 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
193 }
194 nf->nf_mark = NULL;
195 trace_nfsd_file_alloc(nf);
196 }
197 return nf;
198}
199
200static bool
201nfsd_file_free(struct nfsd_file *nf)
202{
203 bool flush = false;
204
205 trace_nfsd_file_put_final(nf);
206 if (nf->nf_mark)
207 nfsd_file_mark_put(nf->nf_mark);
208 if (nf->nf_file) {
209 get_file(nf->nf_file);
210 filp_close(nf->nf_file, NULL);
211 fput(nf->nf_file);
212 flush = true;
213 }
214 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
215 return flush;
216}
217
Trond Myklebust055b24a2019-09-02 13:02:57 -0400218static bool
219nfsd_file_check_writeback(struct nfsd_file *nf)
220{
221 struct file *file = nf->nf_file;
222 struct address_space *mapping;
223
224 if (!file || !(file->f_mode & FMODE_WRITE))
225 return false;
226 mapping = file->f_mapping;
227 return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
228 mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
229}
230
231static int
232nfsd_file_check_write_error(struct nfsd_file *nf)
233{
234 struct file *file = nf->nf_file;
235
236 if (!file || !(file->f_mode & FMODE_WRITE))
237 return 0;
238 return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
239}
240
241static bool
242nfsd_file_in_use(struct nfsd_file *nf)
243{
244 return nfsd_file_check_writeback(nf) ||
245 nfsd_file_check_write_error(nf);
246}
247
Jeff Layton65294c12019-08-18 14:18:48 -0400248static void
249nfsd_file_do_unhash(struct nfsd_file *nf)
250{
251 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
252
253 trace_nfsd_file_unhash(nf);
254
Trond Myklebust055b24a2019-09-02 13:02:57 -0400255 if (nfsd_file_check_write_error(nf))
256 nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
Jeff Layton65294c12019-08-18 14:18:48 -0400257 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
258 hlist_del_rcu(&nf->nf_node);
259 if (!list_empty(&nf->nf_lru))
260 list_lru_del(&nfsd_file_lru, &nf->nf_lru);
261 atomic_long_dec(&nfsd_filecache_count);
262}
263
264static bool
265nfsd_file_unhash(struct nfsd_file *nf)
266{
267 if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
268 nfsd_file_do_unhash(nf);
269 return true;
270 }
271 return false;
272}
273
274/*
275 * Return true if the file was unhashed.
276 */
277static bool
278nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
279{
280 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
281
282 trace_nfsd_file_unhash_and_release_locked(nf);
283 if (!nfsd_file_unhash(nf))
284 return false;
285 /* keep final reference for nfsd_file_lru_dispose */
286 if (atomic_add_unless(&nf->nf_ref, -1, 1))
287 return true;
288
289 list_add(&nf->nf_lru, dispose);
290 return true;
291}
292
293static int
294nfsd_file_put_noref(struct nfsd_file *nf)
295{
296 int count;
297 trace_nfsd_file_put(nf);
298
299 count = atomic_dec_return(&nf->nf_ref);
300 if (!count) {
301 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
302 nfsd_file_free(nf);
303 }
304 return count;
305}
306
307void
308nfsd_file_put(struct nfsd_file *nf)
309{
310 bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
Trond Myklebust055b24a2019-09-02 13:02:57 -0400311 bool unused = !nfsd_file_in_use(nf);
Jeff Layton65294c12019-08-18 14:18:48 -0400312
313 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
Trond Myklebust055b24a2019-09-02 13:02:57 -0400314 if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
Jeff Layton65294c12019-08-18 14:18:48 -0400315 nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
316}
317
318struct nfsd_file *
319nfsd_file_get(struct nfsd_file *nf)
320{
321 if (likely(atomic_inc_not_zero(&nf->nf_ref)))
322 return nf;
323 return NULL;
324}
325
326static void
327nfsd_file_dispose_list(struct list_head *dispose)
328{
329 struct nfsd_file *nf;
330
331 while(!list_empty(dispose)) {
332 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
333 list_del(&nf->nf_lru);
334 nfsd_file_put_noref(nf);
335 }
336}
337
338static void
339nfsd_file_dispose_list_sync(struct list_head *dispose)
340{
341 bool flush = false;
342 struct nfsd_file *nf;
343
344 while(!list_empty(dispose)) {
345 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
346 list_del(&nf->nf_lru);
347 if (!atomic_dec_and_test(&nf->nf_ref))
348 continue;
349 if (nfsd_file_free(nf))
350 flush = true;
351 }
352 if (flush)
353 flush_delayed_fput();
354}
355
356/*
357 * Note this can deadlock with nfsd_file_cache_purge.
358 */
359static enum lru_status
360nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
361 spinlock_t *lock, void *arg)
362 __releases(lock)
363 __acquires(lock)
364{
365 struct list_head *head = arg;
366 struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
367
368 /*
369 * Do a lockless refcount check. The hashtable holds one reference, so
370 * we look to see if anything else has a reference, or if any have
371 * been put since the shrinker last ran. Those don't get unhashed and
372 * released.
373 *
374 * Note that in the put path, we set the flag and then decrement the
375 * counter. Here we check the counter and then test and clear the flag.
376 * That order is deliberate to ensure that we can do this locklessly.
377 */
378 if (atomic_read(&nf->nf_ref) > 1)
379 goto out_skip;
Trond Myklebust055b24a2019-09-02 13:02:57 -0400380
381 /*
382 * Don't throw out files that are still undergoing I/O or
383 * that have uncleared errors pending.
384 */
385 if (nfsd_file_check_writeback(nf))
386 goto out_skip;
387
Jeff Layton65294c12019-08-18 14:18:48 -0400388 if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
389 goto out_rescan;
390
391 if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
392 goto out_skip;
393
394 list_lru_isolate_move(lru, &nf->nf_lru, head);
395 return LRU_REMOVED;
396out_rescan:
397 set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
398out_skip:
399 return LRU_SKIP;
400}
401
402static void
403nfsd_file_lru_dispose(struct list_head *head)
404{
405 while(!list_empty(head)) {
406 struct nfsd_file *nf = list_first_entry(head,
407 struct nfsd_file, nf_lru);
408 list_del_init(&nf->nf_lru);
409 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
410 nfsd_file_do_unhash(nf);
411 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
412 nfsd_file_put_noref(nf);
413 }
414}
415
416static unsigned long
417nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
418{
419 return list_lru_count(&nfsd_file_lru);
420}
421
422static unsigned long
423nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
424{
425 LIST_HEAD(head);
426 unsigned long ret;
427
428 ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
429 nfsd_file_lru_dispose(&head);
430 return ret;
431}
432
433static struct shrinker nfsd_file_shrinker = {
434 .scan_objects = nfsd_file_lru_scan,
435 .count_objects = nfsd_file_lru_count,
436 .seeks = 1,
437};
438
439static void
440__nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
441 struct list_head *dispose)
442{
443 struct nfsd_file *nf;
444 struct hlist_node *tmp;
445
446 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
447 hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
448 if (inode == nf->nf_inode)
449 nfsd_file_unhash_and_release_locked(nf, dispose);
450 }
451 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
452}
453
454/**
455 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
456 * @inode: inode of the file to attempt to remove
457 *
458 * Walk the whole hash bucket, looking for any files that correspond to "inode".
459 * If any do, then unhash them and put the hashtable reference to them and
460 * destroy any that had their last reference put. Also ensure that any of the
461 * fputs also have their final __fput done as well.
462 */
463void
464nfsd_file_close_inode_sync(struct inode *inode)
465{
466 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
467 NFSD_FILE_HASH_BITS);
468 LIST_HEAD(dispose);
469
470 __nfsd_file_close_inode(inode, hashval, &dispose);
471 trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
472 nfsd_file_dispose_list_sync(&dispose);
473}
474
475/**
476 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
477 * @inode: inode of the file to attempt to remove
478 *
479 * Walk the whole hash bucket, looking for any files that correspond to "inode".
480 * If any do, then unhash them and put the hashtable reference to them and
481 * destroy any that had their last reference put.
482 */
483static void
484nfsd_file_close_inode(struct inode *inode)
485{
486 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
487 NFSD_FILE_HASH_BITS);
488 LIST_HEAD(dispose);
489
490 __nfsd_file_close_inode(inode, hashval, &dispose);
491 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
492 nfsd_file_dispose_list(&dispose);
493}
494
495/**
496 * nfsd_file_delayed_close - close unused nfsd_files
497 * @work: dummy
498 *
499 * Walk the LRU list and close any entries that have not been used since
500 * the last scan.
501 *
502 * Note this can deadlock with nfsd_file_cache_purge.
503 */
504static void
505nfsd_file_delayed_close(struct work_struct *work)
506{
507 LIST_HEAD(head);
508
509 list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
510
511 if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
512 nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
513
514 if (!list_empty(&head)) {
515 nfsd_file_lru_dispose(&head);
516 flush_delayed_fput();
517 }
518}
519
520static int
521nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
522 void *data)
523{
524 struct file_lock *fl = data;
525
526 /* Only close files for F_SETLEASE leases */
527 if (fl->fl_flags & FL_LEASE)
528 nfsd_file_close_inode_sync(file_inode(fl->fl_file));
529 return 0;
530}
531
532static struct notifier_block nfsd_file_lease_notifier = {
533 .notifier_call = nfsd_file_lease_notifier_call,
534};
535
536static int
537nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
538 struct inode *inode,
539 u32 mask, const void *data, int data_type,
540 const struct qstr *file_name, u32 cookie,
541 struct fsnotify_iter_info *iter_info)
542{
543 trace_nfsd_file_fsnotify_handle_event(inode, mask);
544
545 /* Should be no marks on non-regular files */
546 if (!S_ISREG(inode->i_mode)) {
547 WARN_ON_ONCE(1);
548 return 0;
549 }
550
551 /* don't close files if this was not the last link */
552 if (mask & FS_ATTRIB) {
553 if (inode->i_nlink)
554 return 0;
555 }
556
557 nfsd_file_close_inode(inode);
558 return 0;
559}
560
561
562static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
563 .handle_event = nfsd_file_fsnotify_handle_event,
564 .free_mark = nfsd_file_mark_free,
565};
566
567int
568nfsd_file_cache_init(void)
569{
570 int ret = -ENOMEM;
571 unsigned int i;
572
573 clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
574
575 if (nfsd_file_hashtbl)
576 return 0;
577
578 nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
579 sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
580 if (!nfsd_file_hashtbl) {
581 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
582 goto out_err;
583 }
584
585 nfsd_file_slab = kmem_cache_create("nfsd_file",
586 sizeof(struct nfsd_file), 0, 0, NULL);
587 if (!nfsd_file_slab) {
588 pr_err("nfsd: unable to create nfsd_file_slab\n");
589 goto out_err;
590 }
591
592 nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
593 sizeof(struct nfsd_file_mark), 0, 0, NULL);
594 if (!nfsd_file_mark_slab) {
595 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
596 goto out_err;
597 }
598
599
600 ret = list_lru_init(&nfsd_file_lru);
601 if (ret) {
602 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
603 goto out_err;
604 }
605
606 ret = register_shrinker(&nfsd_file_shrinker);
607 if (ret) {
608 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
609 goto out_lru;
610 }
611
612 ret = lease_register_notifier(&nfsd_file_lease_notifier);
613 if (ret) {
614 pr_err("nfsd: unable to register lease notifier: %d\n", ret);
615 goto out_shrinker;
616 }
617
618 nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
619 if (IS_ERR(nfsd_file_fsnotify_group)) {
620 pr_err("nfsd: unable to create fsnotify group: %ld\n",
621 PTR_ERR(nfsd_file_fsnotify_group));
622 nfsd_file_fsnotify_group = NULL;
623 goto out_notifier;
624 }
625
626 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
627 INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
628 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
629 }
630
631 INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
632out:
633 return ret;
634out_notifier:
635 lease_unregister_notifier(&nfsd_file_lease_notifier);
636out_shrinker:
637 unregister_shrinker(&nfsd_file_shrinker);
638out_lru:
639 list_lru_destroy(&nfsd_file_lru);
640out_err:
641 kmem_cache_destroy(nfsd_file_slab);
642 nfsd_file_slab = NULL;
643 kmem_cache_destroy(nfsd_file_mark_slab);
644 nfsd_file_mark_slab = NULL;
645 kfree(nfsd_file_hashtbl);
646 nfsd_file_hashtbl = NULL;
647 goto out;
648}
649
650/*
651 * Note this can deadlock with nfsd_file_lru_cb.
652 */
653void
Trond Myklebust5e113222019-09-02 13:02:55 -0400654nfsd_file_cache_purge(struct net *net)
Jeff Layton65294c12019-08-18 14:18:48 -0400655{
656 unsigned int i;
657 struct nfsd_file *nf;
Trond Myklebust5e113222019-09-02 13:02:55 -0400658 struct hlist_node *next;
Jeff Layton65294c12019-08-18 14:18:48 -0400659 LIST_HEAD(dispose);
660 bool del;
661
662 if (!nfsd_file_hashtbl)
663 return;
664
665 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
Trond Myklebust5e113222019-09-02 13:02:55 -0400666 struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
667
668 spin_lock(&nfb->nfb_lock);
669 hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
670 if (net && nf->nf_net != net)
671 continue;
Jeff Layton65294c12019-08-18 14:18:48 -0400672 del = nfsd_file_unhash_and_release_locked(nf, &dispose);
673
674 /*
675 * Deadlock detected! Something marked this entry as
676 * unhased, but hasn't removed it from the hash list.
677 */
678 WARN_ON_ONCE(!del);
679 }
Trond Myklebust5e113222019-09-02 13:02:55 -0400680 spin_unlock(&nfb->nfb_lock);
Jeff Layton65294c12019-08-18 14:18:48 -0400681 nfsd_file_dispose_list(&dispose);
682 }
683}
684
685void
686nfsd_file_cache_shutdown(void)
687{
Jeff Layton65294c12019-08-18 14:18:48 -0400688 set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
689
690 lease_unregister_notifier(&nfsd_file_lease_notifier);
691 unregister_shrinker(&nfsd_file_shrinker);
692 /*
693 * make sure all callers of nfsd_file_lru_cb are done before
694 * calling nfsd_file_cache_purge
695 */
696 cancel_delayed_work_sync(&nfsd_filecache_laundrette);
Trond Myklebust5e113222019-09-02 13:02:55 -0400697 nfsd_file_cache_purge(NULL);
Jeff Layton65294c12019-08-18 14:18:48 -0400698 list_lru_destroy(&nfsd_file_lru);
699 rcu_barrier();
700 fsnotify_put_group(nfsd_file_fsnotify_group);
701 nfsd_file_fsnotify_group = NULL;
702 kmem_cache_destroy(nfsd_file_slab);
703 nfsd_file_slab = NULL;
704 fsnotify_wait_marks_destroyed();
705 kmem_cache_destroy(nfsd_file_mark_slab);
706 nfsd_file_mark_slab = NULL;
707 kfree(nfsd_file_hashtbl);
708 nfsd_file_hashtbl = NULL;
709}
710
711static bool
712nfsd_match_cred(const struct cred *c1, const struct cred *c2)
713{
714 int i;
715
716 if (!uid_eq(c1->fsuid, c2->fsuid))
717 return false;
718 if (!gid_eq(c1->fsgid, c2->fsgid))
719 return false;
720 if (c1->group_info == NULL || c2->group_info == NULL)
721 return c1->group_info == c2->group_info;
722 if (c1->group_info->ngroups != c2->group_info->ngroups)
723 return false;
724 for (i = 0; i < c1->group_info->ngroups; i++) {
725 if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
726 return false;
727 }
728 return true;
729}
730
731static struct nfsd_file *
732nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
Trond Myklebust5e113222019-09-02 13:02:55 -0400733 unsigned int hashval, struct net *net)
Jeff Layton65294c12019-08-18 14:18:48 -0400734{
735 struct nfsd_file *nf;
736 unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
737
738 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
739 nf_node) {
740 if ((need & nf->nf_may) != need)
741 continue;
742 if (nf->nf_inode != inode)
743 continue;
Trond Myklebust5e113222019-09-02 13:02:55 -0400744 if (nf->nf_net != net)
745 continue;
Jeff Layton65294c12019-08-18 14:18:48 -0400746 if (!nfsd_match_cred(nf->nf_cred, current_cred()))
747 continue;
748 if (nfsd_file_get(nf) != NULL)
749 return nf;
750 }
751 return NULL;
752}
753
754/**
755 * nfsd_file_is_cached - are there any cached open files for this fh?
756 * @inode: inode of the file to check
757 *
758 * Scan the hashtable for open files that match this fh. Returns true if there
759 * are any, and false if not.
760 */
761bool
762nfsd_file_is_cached(struct inode *inode)
763{
764 bool ret = false;
765 struct nfsd_file *nf;
766 unsigned int hashval;
767
768 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
769
770 rcu_read_lock();
771 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
772 nf_node) {
773 if (inode == nf->nf_inode) {
774 ret = true;
775 break;
776 }
777 }
778 rcu_read_unlock();
779 trace_nfsd_file_is_cached(inode, hashval, (int)ret);
780 return ret;
781}
782
783__be32
784nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
785 unsigned int may_flags, struct nfsd_file **pnf)
786{
787 __be32 status;
Trond Myklebust5e113222019-09-02 13:02:55 -0400788 struct net *net = SVC_NET(rqstp);
Jeff Layton65294c12019-08-18 14:18:48 -0400789 struct nfsd_file *nf, *new;
790 struct inode *inode;
791 unsigned int hashval;
Trond Myklebust28c7d862020-01-06 13:18:03 -0500792 bool retry = true;
Jeff Layton65294c12019-08-18 14:18:48 -0400793
794 /* FIXME: skip this if fh_dentry is already set? */
795 status = fh_verify(rqstp, fhp, S_IFREG,
796 may_flags|NFSD_MAY_OWNER_OVERRIDE);
797 if (status != nfs_ok)
798 return status;
799
800 inode = d_inode(fhp->fh_dentry);
801 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
802retry:
803 rcu_read_lock();
Trond Myklebust5e113222019-09-02 13:02:55 -0400804 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
Jeff Layton65294c12019-08-18 14:18:48 -0400805 rcu_read_unlock();
806 if (nf)
807 goto wait_for_construction;
808
Trond Myklebust5e113222019-09-02 13:02:55 -0400809 new = nfsd_file_alloc(inode, may_flags, hashval, net);
Jeff Layton65294c12019-08-18 14:18:48 -0400810 if (!new) {
811 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
812 NULL, nfserr_jukebox);
813 return nfserr_jukebox;
814 }
815
816 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
Trond Myklebust5e113222019-09-02 13:02:55 -0400817 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
Jeff Layton65294c12019-08-18 14:18:48 -0400818 if (nf == NULL)
819 goto open_file;
820 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
821 nfsd_file_slab_free(&new->nf_rcu);
822
823wait_for_construction:
824 wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
825
826 /* Did construction of this file fail? */
827 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
Trond Myklebust28c7d862020-01-06 13:18:03 -0500828 if (!retry) {
829 status = nfserr_jukebox;
830 goto out;
831 }
832 retry = false;
Jeff Layton65294c12019-08-18 14:18:48 -0400833 nfsd_file_put_noref(nf);
834 goto retry;
835 }
836
837 this_cpu_inc(nfsd_file_cache_hits);
838
839 if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
840 bool write = (may_flags & NFSD_MAY_WRITE);
841
842 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
843 (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
844 status = nfserrno(nfsd_open_break_lease(
845 file_inode(nf->nf_file), may_flags));
846 if (status == nfs_ok) {
847 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
848 if (write)
849 clear_bit(NFSD_FILE_BREAK_WRITE,
850 &nf->nf_flags);
851 }
852 }
853 }
854out:
855 if (status == nfs_ok) {
856 *pnf = nf;
857 } else {
858 nfsd_file_put(nf);
859 nf = NULL;
860 }
861
862 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
863 return status;
864open_file:
865 nf = new;
866 /* Take reference for the hashtable */
867 atomic_inc(&nf->nf_ref);
868 __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
869 __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
870 list_lru_add(&nfsd_file_lru, &nf->nf_lru);
871 hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
872 ++nfsd_file_hashtbl[hashval].nfb_count;
873 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
874 nfsd_file_hashtbl[hashval].nfb_count);
875 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
876 atomic_long_inc(&nfsd_filecache_count);
877
878 nf->nf_mark = nfsd_file_mark_find_or_create(nf);
879 if (nf->nf_mark)
880 status = nfsd_open_verified(rqstp, fhp, S_IFREG,
881 may_flags, &nf->nf_file);
882 else
883 status = nfserr_jukebox;
884 /*
885 * If construction failed, or we raced with a call to unlink()
886 * then unhash.
887 */
888 if (status != nfs_ok || inode->i_nlink == 0) {
889 bool do_free;
890 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
891 do_free = nfsd_file_unhash(nf);
892 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
893 if (do_free)
894 nfsd_file_put_noref(nf);
895 }
896 clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
897 smp_mb__after_atomic();
898 wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
899 goto out;
900}
901
902/*
903 * Note that fields may be added, removed or reordered in the future. Programs
904 * scraping this file for info should test the labels to ensure they're
905 * getting the correct field.
906 */
907static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
908{
909 unsigned int i, count = 0, longest = 0;
910 unsigned long hits = 0;
911
912 /*
913 * No need for spinlocks here since we're not terribly interested in
914 * accuracy. We do take the nfsd_mutex simply to ensure that we
915 * don't end up racing with server shutdown
916 */
917 mutex_lock(&nfsd_mutex);
918 if (nfsd_file_hashtbl) {
919 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
920 count += nfsd_file_hashtbl[i].nfb_count;
921 longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
922 }
923 }
924 mutex_unlock(&nfsd_mutex);
925
926 for_each_possible_cpu(i)
927 hits += per_cpu(nfsd_file_cache_hits, i);
928
929 seq_printf(m, "total entries: %u\n", count);
930 seq_printf(m, "longest chain: %u\n", longest);
931 seq_printf(m, "cache hits: %lu\n", hits);
932 return 0;
933}
934
935int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
936{
937 return single_open(file, nfsd_file_cache_stats_show, NULL);
938}