blob: 53507aa96b0b63df96dc504abd7dc952c50d00d8 [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells14727282009-04-03 16:42:42 +01002/* NFS filesystem cache interface
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells14727282009-04-03 16:42:42 +01006 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/nfs_fs.h>
13#include <linux/nfs_fs_sb.h>
14#include <linux/in6.h>
15#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
David Howells402cb8d2018-04-04 13:41:28 +010017#include <linux/iversion.h>
David Howells14727282009-04-03 16:42:42 +010018
19#include "internal.h"
David Howells545db452009-04-03 16:42:44 +010020#include "iostat.h"
David Howells14727282009-04-03 16:42:42 +010021#include "fscache.h"
22
23#define NFSDBG_FACILITY NFSDBG_FSCACHE
24
David Howells08734042009-04-03 16:42:42 +010025static struct rb_root nfs_fscache_keys = RB_ROOT;
26static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
27
David Howells14727282009-04-03 16:42:42 +010028/*
David Howells402cb8d2018-04-04 13:41:28 +010029 * Layout of the key for an NFS server cache object.
30 */
31struct nfs_server_key {
32 struct {
33 uint16_t nfsversion; /* NFS protocol version */
34 uint16_t family; /* address family */
35 __be16 port; /* IP port */
36 } hdr;
37 union {
38 struct in_addr ipv4_addr; /* IPv4 address */
39 struct in6_addr ipv6_addr; /* IPv6 address */
40 };
41} __packed;
42
43/*
David Howells14727282009-04-03 16:42:42 +010044 * Get the per-client index cookie for an NFS client if the appropriate mount
45 * flag was set
46 * - We always try and get an index cookie for the client, but get filehandle
47 * cookies on a per-superblock basis, depending on the mount flags
48 */
49void nfs_fscache_get_client_cookie(struct nfs_client *clp)
50{
David Howells402cb8d2018-04-04 13:41:28 +010051 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
52 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
53 struct nfs_server_key key;
54 uint16_t len = sizeof(key.hdr);
55
56 memset(&key, 0, sizeof(key));
57 key.hdr.nfsversion = clp->rpc_ops->version;
58 key.hdr.family = clp->cl_addr.ss_family;
59
60 switch (clp->cl_addr.ss_family) {
61 case AF_INET:
62 key.hdr.port = sin->sin_port;
63 key.ipv4_addr = sin->sin_addr;
64 len += sizeof(key.ipv4_addr);
65 break;
66
67 case AF_INET6:
68 key.hdr.port = sin6->sin6_port;
69 key.ipv6_addr = sin6->sin6_addr;
70 len += sizeof(key.ipv6_addr);
71 break;
72
73 default:
74 printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
75 clp->cl_addr.ss_family);
76 clp->fscache = NULL;
77 return;
78 }
79
David Howells14727282009-04-03 16:42:42 +010080 /* create a cache index for looking up filehandles */
81 clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
82 &nfs_fscache_server_index_def,
David Howells402cb8d2018-04-04 13:41:28 +010083 &key, len,
84 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010085 clp, 0, true);
David Howells14727282009-04-03 16:42:42 +010086 dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
87 clp, clp->fscache);
88}
89
90/*
91 * Dispose of a per-client cookie
92 */
93void nfs_fscache_release_client_cookie(struct nfs_client *clp)
94{
95 dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
96 clp, clp->fscache);
97
David Howells402cb8d2018-04-04 13:41:28 +010098 fscache_relinquish_cookie(clp->fscache, NULL, false);
David Howells14727282009-04-03 16:42:42 +010099 clp->fscache = NULL;
100}
David Howells08734042009-04-03 16:42:42 +0100101
102/*
103 * Get the cache cookie for an NFS superblock. We have to handle
104 * uniquification here because the cache doesn't do it for us.
David Howells2df54802009-09-23 14:36:39 -0400105 *
106 * The default uniquifier is just an empty string, but it may be overridden
107 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
108 * superblock across an automount point of some nature.
David Howells08734042009-04-03 16:42:42 +0100109 */
Bryan Schumaker2311b942012-05-10 15:07:32 -0400110void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
David Howells08734042009-04-03 16:42:42 +0100111{
112 struct nfs_fscache_key *key, *xkey;
113 struct nfs_server *nfss = NFS_SB(sb);
114 struct rb_node **p, *parent;
Bryan Schumaker2311b942012-05-10 15:07:32 -0400115 int diff;
David Howells2df54802009-09-23 14:36:39 -0400116
117 if (!uniq) {
118 uniq = "";
119 ulen = 1;
120 }
121
David Howells08734042009-04-03 16:42:42 +0100122 key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
123 if (!key)
124 return;
125
126 key->nfs_client = nfss->nfs_client;
127 key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
128 key->key.nfs_server.flags = nfss->flags;
129 key->key.nfs_server.rsize = nfss->rsize;
130 key->key.nfs_server.wsize = nfss->wsize;
131 key->key.nfs_server.acregmin = nfss->acregmin;
132 key->key.nfs_server.acregmax = nfss->acregmax;
133 key->key.nfs_server.acdirmin = nfss->acdirmin;
134 key->key.nfs_server.acdirmax = nfss->acdirmax;
135 key->key.nfs_server.fsid = nfss->fsid;
136 key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
137
138 key->key.uniq_len = ulen;
139 memcpy(key->key.uniquifier, uniq, ulen);
140
141 spin_lock(&nfs_fscache_keys_lock);
142 p = &nfs_fscache_keys.rb_node;
143 parent = NULL;
144 while (*p) {
145 parent = *p;
146 xkey = rb_entry(parent, struct nfs_fscache_key, node);
147
148 if (key->nfs_client < xkey->nfs_client)
149 goto go_left;
150 if (key->nfs_client > xkey->nfs_client)
151 goto go_right;
152
153 diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
154 if (diff < 0)
155 goto go_left;
156 if (diff > 0)
157 goto go_right;
158
159 if (key->key.uniq_len == 0)
160 goto non_unique;
161 diff = memcmp(key->key.uniquifier,
162 xkey->key.uniquifier,
163 key->key.uniq_len);
164 if (diff < 0)
165 goto go_left;
166 if (diff > 0)
167 goto go_right;
168 goto non_unique;
169
170 go_left:
171 p = &(*p)->rb_left;
172 continue;
173 go_right:
174 p = &(*p)->rb_right;
175 }
176
177 rb_link_node(&key->node, parent, p);
178 rb_insert_color(&key->node, &nfs_fscache_keys);
179 spin_unlock(&nfs_fscache_keys_lock);
180 nfss->fscache_key = key;
181
182 /* create a cache index for looking up filehandles */
183 nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
184 &nfs_fscache_super_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100185 key, sizeof(*key) + ulen,
186 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +0100187 nfss, 0, true);
David Howells08734042009-04-03 16:42:42 +0100188 dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
189 nfss, nfss->fscache);
190 return;
191
192non_unique:
193 spin_unlock(&nfs_fscache_keys_lock);
194 kfree(key);
195 nfss->fscache_key = NULL;
196 nfss->fscache = NULL;
197 printk(KERN_WARNING "NFS:"
198 " Cache request denied due to non-unique superblock keys\n");
199}
200
201/*
202 * release a per-superblock cookie
203 */
204void nfs_fscache_release_super_cookie(struct super_block *sb)
205{
206 struct nfs_server *nfss = NFS_SB(sb);
207
208 dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
209 nfss, nfss->fscache);
210
David Howells402cb8d2018-04-04 13:41:28 +0100211 fscache_relinquish_cookie(nfss->fscache, NULL, false);
David Howells08734042009-04-03 16:42:42 +0100212 nfss->fscache = NULL;
213
214 if (nfss->fscache_key) {
215 spin_lock(&nfs_fscache_keys_lock);
216 rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
217 spin_unlock(&nfs_fscache_keys_lock);
218 kfree(nfss->fscache_key);
219 nfss->fscache_key = NULL;
220 }
221}
David Howellsef79c092009-04-03 16:42:43 +0100222
223/*
224 * Initialise the per-inode cache cookie pointer for an NFS inode.
225 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100226void nfs_fscache_init_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100227{
David Howells402cb8d2018-04-04 13:41:28 +0100228 struct nfs_fscache_inode_auxdata auxdata;
David Howellsef79c092009-04-03 16:42:43 +0100229 struct nfs_inode *nfsi = NFS_I(inode);
230
David Howellsf1fe29b2013-09-27 11:20:03 +0100231 nfsi->fscache = NULL;
232 if (!S_ISREG(inode->i_mode))
David Howellsef79c092009-04-03 16:42:43 +0100233 return;
David Howells402cb8d2018-04-04 13:41:28 +0100234
235 memset(&auxdata, 0, sizeof(auxdata));
Deepa Dinamani95582b02018-05-08 19:36:02 -0700236 auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
237 auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
David Howells402cb8d2018-04-04 13:41:28 +0100238
239 if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
240 auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
241
David Howellsf1fe29b2013-09-27 11:20:03 +0100242 nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
243 &nfs_fscache_inode_object_def,
David Howells402cb8d2018-04-04 13:41:28 +0100244 nfsi->fh.data, nfsi->fh.size,
245 &auxdata, sizeof(auxdata),
David Howellsee1235a2018-04-04 13:41:28 +0100246 nfsi, nfsi->vfs_inode.i_size, false);
David Howellsef79c092009-04-03 16:42:43 +0100247}
248
249/*
250 * Release a per-inode cookie.
251 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100252void nfs_fscache_clear_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100253{
David Howells402cb8d2018-04-04 13:41:28 +0100254 struct nfs_fscache_inode_auxdata auxdata;
David Howellsef79c092009-04-03 16:42:43 +0100255 struct nfs_inode *nfsi = NFS_I(inode);
David Howellsf1fe29b2013-09-27 11:20:03 +0100256 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howellsef79c092009-04-03 16:42:43 +0100257
David Howellsf1fe29b2013-09-27 11:20:03 +0100258 dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
David Howellsef79c092009-04-03 16:42:43 +0100259
David Howells402cb8d2018-04-04 13:41:28 +0100260 memset(&auxdata, 0, sizeof(auxdata));
Deepa Dinamani95582b02018-05-08 19:36:02 -0700261 auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
262 auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
David Howells402cb8d2018-04-04 13:41:28 +0100263 fscache_relinquish_cookie(cookie, &auxdata, false);
David Howellsef79c092009-04-03 16:42:43 +0100264 nfsi->fscache = NULL;
265}
266
David Howellsf1fe29b2013-09-27 11:20:03 +0100267static bool nfs_fscache_can_enable(void *data)
David Howellsef79c092009-04-03 16:42:43 +0100268{
David Howellsf1fe29b2013-09-27 11:20:03 +0100269 struct inode *inode = data;
David Howellsef79c092009-04-03 16:42:43 +0100270
David Howellsf1fe29b2013-09-27 11:20:03 +0100271 return !inode_is_open_for_write(inode);
David Howellsef79c092009-04-03 16:42:43 +0100272}
273
274/*
David Howellsf1fe29b2013-09-27 11:20:03 +0100275 * Enable or disable caching for a file that is being opened as appropriate.
276 * The cookie is allocated when the inode is initialised, but is not enabled at
277 * that time. Enablement is deferred to file-open time to avoid stat() and
278 * access() thrashing the cache.
279 *
280 * For now, with NFS, only regular files that are open read-only will be able
281 * to use the cache.
282 *
283 * We enable the cache for an inode if we open it read-only and it isn't
284 * currently open for writing. We disable the cache if the inode is open
285 * write-only.
286 *
287 * The caller uses the file struct to pin i_writecount on the inode before
288 * calling us when a file is opened for writing, so we can make use of that.
289 *
290 * Note that this may be invoked multiple times in parallel by parallel
291 * nfs_open() functions.
David Howellsef79c092009-04-03 16:42:43 +0100292 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100293void nfs_fscache_open_file(struct inode *inode, struct file *filp)
David Howellsef79c092009-04-03 16:42:43 +0100294{
David Howells402cb8d2018-04-04 13:41:28 +0100295 struct nfs_fscache_inode_auxdata auxdata;
David Howellsf1fe29b2013-09-27 11:20:03 +0100296 struct nfs_inode *nfsi = NFS_I(inode);
297 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howellsef79c092009-04-03 16:42:43 +0100298
David Howellsf1fe29b2013-09-27 11:20:03 +0100299 if (!fscache_cookie_valid(cookie))
300 return;
David Howellsef79c092009-04-03 16:42:43 +0100301
David Howells402cb8d2018-04-04 13:41:28 +0100302 memset(&auxdata, 0, sizeof(auxdata));
Deepa Dinamani95582b02018-05-08 19:36:02 -0700303 auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime);
304 auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime);
David Howells402cb8d2018-04-04 13:41:28 +0100305
David Howellsf1fe29b2013-09-27 11:20:03 +0100306 if (inode_is_open_for_write(inode)) {
307 dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
308 clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
David Howells402cb8d2018-04-04 13:41:28 +0100309 fscache_disable_cookie(cookie, &auxdata, true);
David Howellsf1fe29b2013-09-27 11:20:03 +0100310 fscache_uncache_all_inode_pages(cookie, inode);
311 } else {
312 dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
David Howellsee1235a2018-04-04 13:41:28 +0100313 fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
David Howells402cb8d2018-04-04 13:41:28 +0100314 nfs_fscache_can_enable, inode);
David Howellsf1fe29b2013-09-27 11:20:03 +0100315 if (fscache_cookie_enabled(cookie))
316 set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
David Howellsef79c092009-04-03 16:42:43 +0100317 }
318}
David Howellsf1fe29b2013-09-27 11:20:03 +0100319EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
David Howells545db452009-04-03 16:42:44 +0100320
321/*
322 * Release the caching state associated with a page, if the page isn't busy
323 * interacting with the cache.
324 * - Returns true (can release page) or false (page busy).
325 */
326int nfs_fscache_release_page(struct page *page, gfp_t gfp)
327{
David Howells545db452009-04-03 16:42:44 +0100328 if (PageFsCache(page)) {
David Howellsf1fe29b2013-09-27 11:20:03 +0100329 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
Trond Myklebust2c174002010-02-08 09:32:27 -0500330
331 BUG_ON(!cookie);
David Howells545db452009-04-03 16:42:44 +0100332 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100333 cookie, page, NFS_I(page->mapping->host));
David Howells545db452009-04-03 16:42:44 +0100334
David Howells201a1542009-11-19 18:11:35 +0000335 if (!fscache_maybe_release_page(cookie, page, gfp))
336 return 0;
337
Li RongQinge9f456c2014-11-23 12:47:41 +0800338 nfs_inc_fscache_stats(page->mapping->host,
339 NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells545db452009-04-03 16:42:44 +0100340 }
341
342 return 1;
343}
344
345/*
346 * Release the caching state associated with a page if undergoing complete page
347 * invalidation.
348 */
349void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
350{
David Howellsf1fe29b2013-09-27 11:20:03 +0100351 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howells545db452009-04-03 16:42:44 +0100352
353 BUG_ON(!cookie);
354
355 dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100356 cookie, page, NFS_I(inode));
David Howells545db452009-04-03 16:42:44 +0100357
358 fscache_wait_on_page_write(cookie, page);
359
360 BUG_ON(!PageLocked(page));
361 fscache_uncache_page(cookie, page);
Li RongQinge9f456c2014-11-23 12:47:41 +0800362 nfs_inc_fscache_stats(page->mapping->host,
363 NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells545db452009-04-03 16:42:44 +0100364}
David Howells9a9fc1c2009-04-03 16:42:44 +0100365
366/*
367 * Handle completion of a page being read from the cache.
368 * - Called in process (keventd) context.
369 */
370static void nfs_readpage_from_fscache_complete(struct page *page,
371 void *context,
372 int error)
373{
374 dfprintk(FSCACHE,
375 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
376 page, context, error);
377
378 /* if the read completes with an error, we just unlock the page and let
379 * the VM reissue the readpage */
380 if (!error) {
381 SetPageUptodate(page);
382 unlock_page(page);
383 } else {
384 error = nfs_readpage_async(context, page->mapping->host, page);
385 if (error)
386 unlock_page(page);
387 }
388}
389
390/*
391 * Retrieve a page from fscache
392 */
393int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
394 struct inode *inode, struct page *page)
395{
396 int ret;
397
398 dfprintk(FSCACHE,
399 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100400 nfs_i_fscache(inode), page, page->index, page->flags, inode);
David Howells9a9fc1c2009-04-03 16:42:44 +0100401
David Howellsf1fe29b2013-09-27 11:20:03 +0100402 ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
David Howells9a9fc1c2009-04-03 16:42:44 +0100403 page,
404 nfs_readpage_from_fscache_complete,
405 ctx,
406 GFP_KERNEL);
407
408 switch (ret) {
409 case 0: /* read BIO submitted (page in fscache) */
410 dfprintk(FSCACHE,
411 "NFS: readpage_from_fscache: BIO submitted\n");
Li RongQinge9f456c2014-11-23 12:47:41 +0800412 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
David Howells9a9fc1c2009-04-03 16:42:44 +0100413 return ret;
414
415 case -ENOBUFS: /* inode not in cache */
416 case -ENODATA: /* page not in cache */
Li RongQinge9f456c2014-11-23 12:47:41 +0800417 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
David Howells9a9fc1c2009-04-03 16:42:44 +0100418 dfprintk(FSCACHE,
419 "NFS: readpage_from_fscache %d\n", ret);
420 return 1;
421
422 default:
423 dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
Li RongQinge9f456c2014-11-23 12:47:41 +0800424 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
David Howells9a9fc1c2009-04-03 16:42:44 +0100425 }
426 return ret;
427}
428
429/*
430 * Retrieve a set of pages from fscache
431 */
432int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
433 struct inode *inode,
434 struct address_space *mapping,
435 struct list_head *pages,
436 unsigned *nr_pages)
437{
Chuck Lever0f15c532010-05-07 13:33:48 -0400438 unsigned npages = *nr_pages;
439 int ret;
David Howells9a9fc1c2009-04-03 16:42:44 +0100440
441 dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100442 nfs_i_fscache(inode), npages, inode);
David Howells9a9fc1c2009-04-03 16:42:44 +0100443
David Howellsf1fe29b2013-09-27 11:20:03 +0100444 ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
David Howells9a9fc1c2009-04-03 16:42:44 +0100445 mapping, pages, nr_pages,
446 nfs_readpage_from_fscache_complete,
447 ctx,
448 mapping_gfp_mask(mapping));
449 if (*nr_pages < npages)
450 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
451 npages);
452 if (*nr_pages > 0)
453 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
454 *nr_pages);
455
456 switch (ret) {
457 case 0: /* read submitted to the cache for all pages */
458 BUG_ON(!list_empty(pages));
459 BUG_ON(*nr_pages != 0);
460 dfprintk(FSCACHE,
461 "NFS: nfs_getpages_from_fscache: submitted\n");
462
463 return ret;
464
465 case -ENOBUFS: /* some pages aren't cached and can't be */
466 case -ENODATA: /* some pages aren't cached */
467 dfprintk(FSCACHE,
468 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
469 return 1;
470
471 default:
472 dfprintk(FSCACHE,
473 "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
474 }
475
476 return ret;
477}
David Howells7f8e05f2009-04-03 16:42:45 +0100478
479/*
480 * Store a newly fetched page in fscache
481 * - PG_fscache must be set on the page
482 */
483void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
484{
485 int ret;
486
487 dfprintk(FSCACHE,
488 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100489 nfs_i_fscache(inode), page, page->index, page->flags, sync);
David Howells7f8e05f2009-04-03 16:42:45 +0100490
David Howellsee1235a2018-04-04 13:41:28 +0100491 ret = fscache_write_page(nfs_i_fscache(inode), page,
492 inode->i_size, GFP_KERNEL);
David Howells7f8e05f2009-04-03 16:42:45 +0100493 dfprintk(FSCACHE,
494 "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
495 page, page->index, page->flags, ret);
496
497 if (ret != 0) {
David Howellsf1fe29b2013-09-27 11:20:03 +0100498 fscache_uncache_page(nfs_i_fscache(inode), page);
Li RongQinge9f456c2014-11-23 12:47:41 +0800499 nfs_inc_fscache_stats(inode,
500 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
501 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells7f8e05f2009-04-03 16:42:45 +0100502 } else {
Li RongQinge9f456c2014-11-23 12:47:41 +0800503 nfs_inc_fscache_stats(inode,
504 NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
David Howells7f8e05f2009-04-03 16:42:45 +0100505 }
506}