blob: f256c8aff7bb5fa7db56210c7961ce56aaedd6d9 [file] [log] [blame]
David Howells1bd9c4e2021-11-18 08:58:08 +00001// SPDX-License-Identifier: GPL-2.0-or-later
2/* CacheFiles path walking and related routines
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/fs.h>
David Howells32759f72021-10-21 08:34:55 +01009#include <linux/namei.h>
David Howells1bd9c4e2021-11-18 08:58:08 +000010#include "internal.h"
11
12/*
13 * Mark the backing file as being a cache file if it's not already in use. The
14 * mark tells the culling request command that it's not allowed to cull the
15 * file or directory. The caller must hold the inode lock.
16 */
17static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 struct dentry *dentry)
19{
20 struct inode *inode = d_backing_inode(dentry);
21 bool can_use = false;
22
23 if (!(inode->i_flags & S_KERNEL_FILE)) {
24 inode->i_flags |= S_KERNEL_FILE;
25 trace_cachefiles_mark_active(object, inode);
26 can_use = true;
27 } else {
David Howellsb64a3312022-01-14 11:05:13 +000028 trace_cachefiles_mark_failed(object, inode);
29 pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
30 dentry, inode->i_ino);
David Howells1bd9c4e2021-11-18 08:58:08 +000031 }
32
33 return can_use;
34}
35
David Howells169379e2021-11-18 08:58:08 +000036static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
37 struct dentry *dentry)
38{
39 struct inode *inode = d_backing_inode(dentry);
40 bool can_use;
41
42 inode_lock(inode);
43 can_use = __cachefiles_mark_inode_in_use(object, dentry);
44 inode_unlock(inode);
45 return can_use;
46}
47
David Howells1bd9c4e2021-11-18 08:58:08 +000048/*
49 * Unmark a backing inode. The caller must hold the inode lock.
50 */
51static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
52 struct dentry *dentry)
53{
54 struct inode *inode = d_backing_inode(dentry);
55
56 inode->i_flags &= ~S_KERNEL_FILE;
57 trace_cachefiles_mark_inactive(object, inode);
58}
David Howells32759f72021-10-21 08:34:55 +010059
60/*
David Howells169379e2021-11-18 08:58:08 +000061 * Unmark a backing inode and tell cachefilesd that there's something that can
62 * be culled.
63 */
64void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
65 struct file *file)
66{
67 struct cachefiles_cache *cache = object->volume->cache;
68 struct inode *inode = file_inode(file);
69
70 if (inode) {
71 inode_lock(inode);
72 __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
73 inode_unlock(inode);
74
75 if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
76 atomic_long_add(inode->i_blocks, &cache->b_released);
77 if (atomic_inc_return(&cache->f_released))
78 cachefiles_state_changed(cache);
79 }
80 }
81}
82
83/*
David Howells32759f72021-10-21 08:34:55 +010084 * get a subdirectory
85 */
86struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
87 struct dentry *dir,
88 const char *dirname,
89 bool *_is_new)
90{
91 struct dentry *subdir;
92 struct path path;
93 int ret;
94
95 _enter(",,%s", dirname);
96
97 /* search the current directory for the element name */
98 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
99
100retry:
101 ret = cachefiles_inject_read_error();
102 if (ret == 0)
103 subdir = lookup_one_len(dirname, dir, strlen(dirname));
104 else
105 subdir = ERR_PTR(ret);
David Howells8c39b8b2022-01-14 11:44:54 +0000106 trace_cachefiles_lookup(NULL, dir, subdir);
David Howells32759f72021-10-21 08:34:55 +0100107 if (IS_ERR(subdir)) {
108 trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
109 PTR_ERR(subdir),
110 cachefiles_trace_lookup_error);
111 if (PTR_ERR(subdir) == -ENOMEM)
112 goto nomem_d_alloc;
113 goto lookup_error;
114 }
115
116 _debug("subdir -> %pd %s",
117 subdir, d_backing_inode(subdir) ? "positive" : "negative");
118
119 /* we need to create the subdir if it doesn't exist yet */
120 if (d_is_negative(subdir)) {
David Howells3929eca2021-10-21 21:58:29 +0100121 ret = cachefiles_has_space(cache, 1, 0,
122 cachefiles_has_space_for_create);
David Howells32759f72021-10-21 08:34:55 +0100123 if (ret < 0)
124 goto mkdir_error;
125
126 _debug("attempt mkdir");
127
128 path.mnt = cache->mnt;
129 path.dentry = dir;
130 ret = security_path_mkdir(&path, subdir, 0700);
131 if (ret < 0)
132 goto mkdir_error;
133 ret = cachefiles_inject_write_error();
134 if (ret == 0)
135 ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
136 if (ret < 0) {
137 trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
138 cachefiles_trace_mkdir_error);
139 goto mkdir_error;
140 }
David Howells8c39b8b2022-01-14 11:44:54 +0000141 trace_cachefiles_mkdir(dir, subdir);
David Howells32759f72021-10-21 08:34:55 +0100142
143 if (unlikely(d_unhashed(subdir))) {
144 cachefiles_put_directory(subdir);
145 goto retry;
146 }
147 ASSERT(d_backing_inode(subdir));
148
149 _debug("mkdir -> %pd{ino=%lu}",
150 subdir, d_backing_inode(subdir)->i_ino);
151 if (_is_new)
152 *_is_new = true;
153 }
154
155 /* Tell rmdir() it's not allowed to delete the subdir */
156 inode_lock(d_inode(subdir));
157 inode_unlock(d_inode(dir));
158
159 if (!__cachefiles_mark_inode_in_use(NULL, subdir))
160 goto mark_error;
161
162 inode_unlock(d_inode(subdir));
163
164 /* we need to make sure the subdir is a directory */
165 ASSERT(d_backing_inode(subdir));
166
167 if (!d_can_lookup(subdir)) {
168 pr_err("%s is not a directory\n", dirname);
169 ret = -EIO;
170 goto check_error;
171 }
172
173 ret = -EPERM;
174 if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
175 !d_backing_inode(subdir)->i_op->lookup ||
176 !d_backing_inode(subdir)->i_op->mkdir ||
177 !d_backing_inode(subdir)->i_op->rename ||
178 !d_backing_inode(subdir)->i_op->rmdir ||
179 !d_backing_inode(subdir)->i_op->unlink)
180 goto check_error;
181
182 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
183 return subdir;
184
185check_error:
186 cachefiles_put_directory(subdir);
187 _leave(" = %d [check]", ret);
188 return ERR_PTR(ret);
189
190mark_error:
191 inode_unlock(d_inode(subdir));
192 dput(subdir);
193 return ERR_PTR(-EBUSY);
194
195mkdir_error:
196 inode_unlock(d_inode(dir));
197 dput(subdir);
198 pr_err("mkdir %s failed with error %d\n", dirname, ret);
199 return ERR_PTR(ret);
200
201lookup_error:
202 inode_unlock(d_inode(dir));
203 ret = PTR_ERR(subdir);
204 pr_err("Lookup %s failed with error %d\n", dirname, ret);
205 return ERR_PTR(ret);
206
207nomem_d_alloc:
208 inode_unlock(d_inode(dir));
209 _leave(" = -ENOMEM");
210 return ERR_PTR(-ENOMEM);
211}
212
213/*
214 * Put a subdirectory.
215 */
216void cachefiles_put_directory(struct dentry *dir)
217{
218 if (dir) {
219 inode_lock(dir->d_inode);
220 __cachefiles_unmark_inode_in_use(NULL, dir);
221 inode_unlock(dir->d_inode);
222 dput(dir);
223 }
224}
David Howells07a90e92021-10-21 08:50:10 +0100225
226/*
227 * Remove a regular file from the cache.
228 */
229static int cachefiles_unlink(struct cachefiles_cache *cache,
230 struct cachefiles_object *object,
231 struct dentry *dir, struct dentry *dentry,
232 enum fscache_why_object_killed why)
233{
234 struct path path = {
235 .mnt = cache->mnt,
236 .dentry = dir,
237 };
238 int ret;
239
David Howells8c39b8b2022-01-14 11:44:54 +0000240 trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
David Howells07a90e92021-10-21 08:50:10 +0100241 ret = security_path_unlink(&path, dentry);
242 if (ret < 0) {
243 cachefiles_io_error(cache, "Unlink security error");
244 return ret;
245 }
246
247 ret = cachefiles_inject_remove_error();
248 if (ret == 0) {
249 ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
250 if (ret == -EIO)
251 cachefiles_io_error(cache, "Unlink failed");
252 }
253 if (ret != 0)
254 trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
255 cachefiles_trace_unlink_error);
256 return ret;
257}
258
259/*
260 * Delete an object representation from the cache
261 * - File backed objects are unlinked
262 * - Directory backed objects are stuffed into the graveyard for userspace to
263 * delete
264 */
265int cachefiles_bury_object(struct cachefiles_cache *cache,
266 struct cachefiles_object *object,
267 struct dentry *dir,
268 struct dentry *rep,
269 enum fscache_why_object_killed why)
270{
271 struct dentry *grave, *trap;
272 struct path path, path_to_graveyard;
273 char nbuffer[8 + 8 + 1];
274 int ret;
275
276 _enter(",'%pd','%pd'", dir, rep);
277
278 if (rep->d_parent != dir) {
279 inode_unlock(d_inode(dir));
280 _leave(" = -ESTALE");
281 return -ESTALE;
282 }
283
284 /* non-directories can just be unlinked */
285 if (!d_is_dir(rep)) {
286 dget(rep); /* Stop the dentry being negated if it's only pinned
287 * by a file struct.
288 */
289 ret = cachefiles_unlink(cache, object, dir, rep, why);
290 dput(rep);
291
292 inode_unlock(d_inode(dir));
293 _leave(" = %d", ret);
294 return ret;
295 }
296
297 /* directories have to be moved to the graveyard */
298 _debug("move stale object to graveyard");
299 inode_unlock(d_inode(dir));
300
301try_again:
302 /* first step is to make up a grave dentry in the graveyard */
303 sprintf(nbuffer, "%08x%08x",
304 (uint32_t) ktime_get_real_seconds(),
305 (uint32_t) atomic_inc_return(&cache->gravecounter));
306
307 /* do the multiway lock magic */
308 trap = lock_rename(cache->graveyard, dir);
309
310 /* do some checks before getting the grave dentry */
311 if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
312 /* the entry was probably culled when we dropped the parent dir
313 * lock */
314 unlock_rename(cache->graveyard, dir);
315 _leave(" = 0 [culled?]");
316 return 0;
317 }
318
319 if (!d_can_lookup(cache->graveyard)) {
320 unlock_rename(cache->graveyard, dir);
321 cachefiles_io_error(cache, "Graveyard no longer a directory");
322 return -EIO;
323 }
324
325 if (trap == rep) {
326 unlock_rename(cache->graveyard, dir);
327 cachefiles_io_error(cache, "May not make directory loop");
328 return -EIO;
329 }
330
331 if (d_mountpoint(rep)) {
332 unlock_rename(cache->graveyard, dir);
333 cachefiles_io_error(cache, "Mountpoint in cache");
334 return -EIO;
335 }
336
337 grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
338 if (IS_ERR(grave)) {
339 unlock_rename(cache->graveyard, dir);
340 trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
341 PTR_ERR(grave),
342 cachefiles_trace_lookup_error);
343
344 if (PTR_ERR(grave) == -ENOMEM) {
345 _leave(" = -ENOMEM");
346 return -ENOMEM;
347 }
348
349 cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
350 return -EIO;
351 }
352
353 if (d_is_positive(grave)) {
354 unlock_rename(cache->graveyard, dir);
355 dput(grave);
356 grave = NULL;
357 cond_resched();
358 goto try_again;
359 }
360
361 if (d_mountpoint(grave)) {
362 unlock_rename(cache->graveyard, dir);
363 dput(grave);
364 cachefiles_io_error(cache, "Mountpoint in graveyard");
365 return -EIO;
366 }
367
368 /* target should not be an ancestor of source */
369 if (trap == grave) {
370 unlock_rename(cache->graveyard, dir);
371 dput(grave);
372 cachefiles_io_error(cache, "May not make directory loop");
373 return -EIO;
374 }
375
376 /* attempt the rename */
377 path.mnt = cache->mnt;
378 path.dentry = dir;
379 path_to_graveyard.mnt = cache->mnt;
380 path_to_graveyard.dentry = cache->graveyard;
381 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
382 if (ret < 0) {
383 cachefiles_io_error(cache, "Rename security error %d", ret);
384 } else {
385 struct renamedata rd = {
386 .old_mnt_userns = &init_user_ns,
387 .old_dir = d_inode(dir),
388 .old_dentry = rep,
389 .new_mnt_userns = &init_user_ns,
390 .new_dir = d_inode(cache->graveyard),
391 .new_dentry = grave,
392 };
David Howells8c39b8b2022-01-14 11:44:54 +0000393 trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
David Howells07a90e92021-10-21 08:50:10 +0100394 ret = cachefiles_inject_read_error();
395 if (ret == 0)
396 ret = vfs_rename(&rd);
397 if (ret != 0)
398 trace_cachefiles_vfs_error(object, d_inode(dir), ret,
399 cachefiles_trace_rename_error);
400 if (ret != 0 && ret != -ENOMEM)
401 cachefiles_io_error(cache,
402 "Rename failed with error %d", ret);
403 }
404
405 __cachefiles_unmark_inode_in_use(object, rep);
406 unlock_rename(cache->graveyard, dir);
407 dput(grave);
408 _leave(" = 0");
409 return 0;
410}
411
412/*
David Howells1f08c922021-10-21 08:50:10 +0100413 * Delete a cache file.
414 */
415int cachefiles_delete_object(struct cachefiles_object *object,
416 enum fscache_why_object_killed why)
417{
418 struct cachefiles_volume *volume = object->volume;
419 struct dentry *dentry = object->file->f_path.dentry;
420 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
421 int ret;
422
423 _enter(",OBJ%x{%pD}", object->debug_id, object->file);
424
425 /* Stop the dentry being negated if it's only pinned by a file struct. */
426 dget(dentry);
427
428 inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
429 ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
430 inode_unlock(d_backing_inode(fan));
431 dput(dentry);
432 return ret;
433}
434
435/*
436 * Create a temporary file and leave it unattached and un-xattr'd until the
437 * time comes to discard the object from memory.
438 */
439struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
440{
441 struct cachefiles_volume *volume = object->volume;
442 struct cachefiles_cache *cache = volume->cache;
443 const struct cred *saved_cred;
444 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
445 struct file *file;
446 struct path path;
447 uint64_t ni_size = object->cookie->object_size;
448 long ret;
449
450 ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
451
452 cachefiles_begin_secure(cache, &saved_cred);
453
454 path.mnt = cache->mnt;
455 ret = cachefiles_inject_write_error();
456 if (ret == 0)
457 path.dentry = vfs_tmpfile(&init_user_ns, fan, S_IFREG, O_RDWR);
458 else
459 path.dentry = ERR_PTR(ret);
460 if (IS_ERR(path.dentry)) {
461 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(path.dentry),
462 cachefiles_trace_tmpfile_error);
463 if (PTR_ERR(path.dentry) == -EIO)
464 cachefiles_io_error_obj(object, "Failed to create tmpfile");
465 file = ERR_CAST(path.dentry);
466 goto out;
467 }
468
469 trace_cachefiles_tmpfile(object, d_backing_inode(path.dentry));
470
471 if (!cachefiles_mark_inode_in_use(object, path.dentry)) {
472 file = ERR_PTR(-EBUSY);
473 goto out_dput;
474 }
475
476 if (ni_size > 0) {
477 trace_cachefiles_trunc(object, d_backing_inode(path.dentry), 0, ni_size,
478 cachefiles_trunc_expand_tmpfile);
479 ret = cachefiles_inject_write_error();
480 if (ret == 0)
481 ret = vfs_truncate(&path, ni_size);
482 if (ret < 0) {
483 trace_cachefiles_vfs_error(
484 object, d_backing_inode(path.dentry), ret,
485 cachefiles_trace_trunc_error);
486 file = ERR_PTR(ret);
487 goto out_dput;
488 }
489 }
490
491 file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
492 d_backing_inode(path.dentry), cache->cache_cred);
493 if (IS_ERR(file)) {
494 trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
495 PTR_ERR(file),
496 cachefiles_trace_open_error);
497 goto out_dput;
498 }
499 if (unlikely(!file->f_op->read_iter) ||
500 unlikely(!file->f_op->write_iter)) {
501 fput(file);
502 pr_notice("Cache does not support read_iter and write_iter\n");
503 file = ERR_PTR(-EINVAL);
504 }
505
506out_dput:
507 dput(path.dentry);
508out:
509 cachefiles_end_secure(cache, saved_cred);
510 return file;
511}
512
513/*
514 * Create a new file.
515 */
516static bool cachefiles_create_file(struct cachefiles_object *object)
517{
518 struct file *file;
519 int ret;
520
David Howells3929eca2021-10-21 21:58:29 +0100521 ret = cachefiles_has_space(object->volume->cache, 1, 0,
522 cachefiles_has_space_for_create);
David Howells1f08c922021-10-21 08:50:10 +0100523 if (ret < 0)
524 return false;
525
526 file = cachefiles_create_tmpfile(object);
527 if (IS_ERR(file))
528 return false;
529
530 set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
531 set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
532 _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
533 object->file = file;
534 return true;
535}
536
537/*
538 * Open an existing file, checking its attributes and replacing it if it is
539 * stale.
540 */
541static bool cachefiles_open_file(struct cachefiles_object *object,
542 struct dentry *dentry)
543{
544 struct cachefiles_cache *cache = object->volume->cache;
545 struct file *file;
546 struct path path;
547 int ret;
548
549 _enter("%pd", dentry);
550
551 if (!cachefiles_mark_inode_in_use(object, dentry))
552 return false;
553
554 /* We need to open a file interface onto a data file now as we can't do
555 * it on demand because writeback called from do_exit() sees
556 * current->fs == NULL - which breaks d_path() called from ext4 open.
557 */
558 path.mnt = cache->mnt;
559 path.dentry = dentry;
560 file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
561 d_backing_inode(dentry), cache->cache_cred);
562 if (IS_ERR(file)) {
563 trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
564 PTR_ERR(file),
565 cachefiles_trace_open_error);
566 goto error;
567 }
568
569 if (unlikely(!file->f_op->read_iter) ||
570 unlikely(!file->f_op->write_iter)) {
571 pr_notice("Cache does not support read_iter and write_iter\n");
572 goto error_fput;
573 }
574 _debug("file -> %pd positive", dentry);
575
576 ret = cachefiles_check_auxdata(object, file);
577 if (ret < 0)
578 goto check_failed;
579
580 object->file = file;
581
582 /* Always update the atime on an object we've just looked up (this is
583 * used to keep track of culling, and atimes are only updated by read,
584 * write and readdir but not lookup or open).
585 */
586 touch_atime(&file->f_path);
587 dput(dentry);
588 return true;
589
590check_failed:
591 fscache_cookie_lookup_negative(object->cookie);
592 cachefiles_unmark_inode_in_use(object, file);
593 if (ret == -ESTALE) {
594 fput(file);
595 dput(dentry);
596 return cachefiles_create_file(object);
597 }
598error_fput:
599 fput(file);
600error:
601 dput(dentry);
602 return false;
603}
604
605/*
606 * walk from the parent object to the child object through the backing
607 * filesystem, creating directories as we go
608 */
609bool cachefiles_look_up_object(struct cachefiles_object *object)
610{
611 struct cachefiles_volume *volume = object->volume;
612 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
613 int ret;
614
615 _enter("OBJ%x,%s,", object->debug_id, object->d_name);
616
617 /* Look up path "cache/vol/fanout/file". */
618 ret = cachefiles_inject_read_error();
619 if (ret == 0)
620 dentry = lookup_positive_unlocked(object->d_name, fan,
621 object->d_name_len);
622 else
623 dentry = ERR_PTR(ret);
David Howells8c39b8b2022-01-14 11:44:54 +0000624 trace_cachefiles_lookup(object, fan, dentry);
David Howells1f08c922021-10-21 08:50:10 +0100625 if (IS_ERR(dentry)) {
626 if (dentry == ERR_PTR(-ENOENT))
627 goto new_file;
628 if (dentry == ERR_PTR(-EIO))
629 cachefiles_io_error_obj(object, "Lookup failed");
630 return false;
631 }
632
633 if (!d_is_reg(dentry)) {
634 pr_err("%pd is not a file\n", dentry);
635 inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
636 ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
637 FSCACHE_OBJECT_IS_WEIRD);
638 dput(dentry);
639 if (ret < 0)
640 return false;
641 goto new_file;
642 }
643
644 if (!cachefiles_open_file(object, dentry))
645 return false;
646
647 _leave(" = t [%lu]", file_inode(object->file)->i_ino);
648 return true;
649
650new_file:
651 fscache_cookie_lookup_negative(object->cookie);
652 return cachefiles_create_file(object);
653}
654
655/*
656 * Attempt to link a temporary file into its rightful place in the cache.
657 */
658bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
659 struct cachefiles_object *object)
660{
661 struct cachefiles_volume *volume = object->volume;
662 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
663 bool success = false;
664 int ret;
665
666 _enter(",%pD", object->file);
667
668 inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
669 ret = cachefiles_inject_read_error();
670 if (ret == 0)
671 dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
672 else
673 dentry = ERR_PTR(ret);
674 if (IS_ERR(dentry)) {
675 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
676 cachefiles_trace_lookup_error);
677 _debug("lookup fail %ld", PTR_ERR(dentry));
678 goto out_unlock;
679 }
680
681 if (!d_is_negative(dentry)) {
682 if (d_backing_inode(dentry) == file_inode(object->file)) {
683 success = true;
684 goto out_dput;
685 }
686
687 ret = cachefiles_unlink(volume->cache, object, fan, dentry,
688 FSCACHE_OBJECT_IS_STALE);
689 if (ret < 0)
690 goto out_dput;
691
692 dput(dentry);
693 ret = cachefiles_inject_read_error();
694 if (ret == 0)
695 dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
696 else
697 dentry = ERR_PTR(ret);
698 if (IS_ERR(dentry)) {
699 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
700 cachefiles_trace_lookup_error);
701 _debug("lookup fail %ld", PTR_ERR(dentry));
702 goto out_unlock;
703 }
704 }
705
706 ret = cachefiles_inject_read_error();
707 if (ret == 0)
708 ret = vfs_link(object->file->f_path.dentry, &init_user_ns,
709 d_inode(fan), dentry, NULL);
710 if (ret < 0) {
711 trace_cachefiles_vfs_error(object, d_inode(fan), ret,
712 cachefiles_trace_link_error);
713 _debug("link fail %d", ret);
714 } else {
715 trace_cachefiles_link(object, file_inode(object->file));
716 spin_lock(&object->lock);
717 /* TODO: Do we want to switch the file pointer to the new dentry? */
718 clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
719 spin_unlock(&object->lock);
720 success = true;
721 }
722
723out_dput:
724 dput(dentry);
725out_unlock:
726 inode_unlock(d_inode(fan));
727 _leave(" = %u", success);
728 return success;
729}
730
731/*
David Howells07a90e92021-10-21 08:50:10 +0100732 * Look up an inode to be checked or culled. Return -EBUSY if the inode is
733 * marked in use.
734 */
735static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
736 struct dentry *dir,
737 char *filename)
738{
739 struct dentry *victim;
740 int ret = -ENOENT;
741
742 inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
743
744 victim = lookup_one_len(filename, dir, strlen(filename));
745 if (IS_ERR(victim))
746 goto lookup_error;
747 if (d_is_negative(victim))
748 goto lookup_put;
749 if (d_inode(victim)->i_flags & S_KERNEL_FILE)
750 goto lookup_busy;
751 return victim;
752
753lookup_busy:
754 ret = -EBUSY;
755lookup_put:
756 inode_unlock(d_inode(dir));
757 dput(victim);
758 return ERR_PTR(ret);
759
760lookup_error:
761 inode_unlock(d_inode(dir));
762 ret = PTR_ERR(victim);
763 if (ret == -ENOENT)
764 return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
765
766 if (ret == -EIO) {
767 cachefiles_io_error(cache, "Lookup failed");
768 } else if (ret != -ENOMEM) {
769 pr_err("Internal error: %d\n", ret);
770 ret = -EIO;
771 }
772
773 return ERR_PTR(ret);
774}
775
776/*
777 * Cull an object if it's not in use
778 * - called only by cache manager daemon
779 */
780int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
781 char *filename)
782{
783 struct dentry *victim;
784 struct inode *inode;
785 int ret;
786
787 _enter(",%pd/,%s", dir, filename);
788
789 victim = cachefiles_lookup_for_cull(cache, dir, filename);
790 if (IS_ERR(victim))
791 return PTR_ERR(victim);
792
793 /* check to see if someone is using this object */
794 inode = d_inode(victim);
795 inode_lock(inode);
796 if (inode->i_flags & S_KERNEL_FILE) {
797 ret = -EBUSY;
798 } else {
799 /* Stop the cache from picking it back up */
800 inode->i_flags |= S_KERNEL_FILE;
801 ret = 0;
802 }
803 inode_unlock(inode);
804 if (ret < 0)
805 goto error_unlock;
806
807 ret = cachefiles_bury_object(cache, NULL, dir, victim,
808 FSCACHE_OBJECT_WAS_CULLED);
809 if (ret < 0)
810 goto error;
811
David Howells9f08ebc2021-10-22 09:17:58 +0100812 fscache_count_culled();
David Howells07a90e92021-10-21 08:50:10 +0100813 dput(victim);
814 _leave(" = 0");
815 return 0;
816
817error_unlock:
818 inode_unlock(d_inode(dir));
819error:
820 dput(victim);
821 if (ret == -ENOENT)
822 return -ESTALE; /* Probably got retired by the netfs */
823
824 if (ret != -ENOMEM) {
825 pr_err("Internal error: %d\n", ret);
826 ret = -EIO;
827 }
828
829 _leave(" = %d", ret);
830 return ret;
831}
832
833/*
834 * Find out if an object is in use or not
835 * - called only by cache manager daemon
836 * - returns -EBUSY or 0 to indicate whether an object is in use or not
837 */
838int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
839 char *filename)
840{
841 struct dentry *victim;
842 int ret = 0;
843
844 victim = cachefiles_lookup_for_cull(cache, dir, filename);
845 if (IS_ERR(victim))
846 return PTR_ERR(victim);
847
848 inode_unlock(d_inode(dir));
849 dput(victim);
850 return ret;
851}