blob: a60df88efc4049f9f24ca90e96fa4e951f7f2eff [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells14727282009-04-03 16:42:42 +01002/* NFS filesystem cache interface
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells14727282009-04-03 16:42:42 +01006 */
7
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/mm.h>
12#include <linux/nfs_fs.h>
13#include <linux/nfs_fs_sb.h>
14#include <linux/in6.h>
15#include <linux/seq_file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
David Howells402cb8d2018-04-04 13:41:28 +010017#include <linux/iversion.h>
David Howells14727282009-04-03 16:42:42 +010018
19#include "internal.h"
David Howells545db452009-04-03 16:42:44 +010020#include "iostat.h"
David Howells14727282009-04-03 16:42:42 +010021#include "fscache.h"
22
23#define NFSDBG_FACILITY NFSDBG_FSCACHE
24
David Howells08734042009-04-03 16:42:42 +010025static struct rb_root nfs_fscache_keys = RB_ROOT;
26static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
27
David Howells14727282009-04-03 16:42:42 +010028/*
David Howells402cb8d2018-04-04 13:41:28 +010029 * Layout of the key for an NFS server cache object.
30 */
31struct nfs_server_key {
32 struct {
33 uint16_t nfsversion; /* NFS protocol version */
Scott Mayhew55dee1b2020-02-24 16:29:32 -050034 uint32_t minorversion; /* NFSv4 minor version */
David Howells402cb8d2018-04-04 13:41:28 +010035 uint16_t family; /* address family */
36 __be16 port; /* IP port */
37 } hdr;
38 union {
39 struct in_addr ipv4_addr; /* IPv4 address */
40 struct in6_addr ipv6_addr; /* IPv6 address */
41 };
42} __packed;
43
44/*
David Howells14727282009-04-03 16:42:42 +010045 * Get the per-client index cookie for an NFS client if the appropriate mount
46 * flag was set
47 * - We always try and get an index cookie for the client, but get filehandle
48 * cookies on a per-superblock basis, depending on the mount flags
49 */
50void nfs_fscache_get_client_cookie(struct nfs_client *clp)
51{
David Howells402cb8d2018-04-04 13:41:28 +010052 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
53 const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
54 struct nfs_server_key key;
55 uint16_t len = sizeof(key.hdr);
56
57 memset(&key, 0, sizeof(key));
58 key.hdr.nfsversion = clp->rpc_ops->version;
Scott Mayhew55dee1b2020-02-24 16:29:32 -050059 key.hdr.minorversion = clp->cl_minorversion;
David Howells402cb8d2018-04-04 13:41:28 +010060 key.hdr.family = clp->cl_addr.ss_family;
61
62 switch (clp->cl_addr.ss_family) {
63 case AF_INET:
64 key.hdr.port = sin->sin_port;
65 key.ipv4_addr = sin->sin_addr;
66 len += sizeof(key.ipv4_addr);
67 break;
68
69 case AF_INET6:
70 key.hdr.port = sin6->sin6_port;
71 key.ipv6_addr = sin6->sin6_addr;
72 len += sizeof(key.ipv6_addr);
73 break;
74
75 default:
76 printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
77 clp->cl_addr.ss_family);
78 clp->fscache = NULL;
79 return;
80 }
81
David Howells14727282009-04-03 16:42:42 +010082 /* create a cache index for looking up filehandles */
83 clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
84 &nfs_fscache_server_index_def,
David Howells402cb8d2018-04-04 13:41:28 +010085 &key, len,
86 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010087 clp, 0, true);
David Howells14727282009-04-03 16:42:42 +010088 dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
89 clp, clp->fscache);
90}
91
92/*
93 * Dispose of a per-client cookie
94 */
95void nfs_fscache_release_client_cookie(struct nfs_client *clp)
96{
97 dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
98 clp, clp->fscache);
99
David Howells402cb8d2018-04-04 13:41:28 +0100100 fscache_relinquish_cookie(clp->fscache, NULL, false);
David Howells14727282009-04-03 16:42:42 +0100101 clp->fscache = NULL;
102}
David Howells08734042009-04-03 16:42:42 +0100103
104/*
105 * Get the cache cookie for an NFS superblock. We have to handle
106 * uniquification here because the cache doesn't do it for us.
David Howells2df54802009-09-23 14:36:39 -0400107 *
108 * The default uniquifier is just an empty string, but it may be overridden
109 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
110 * superblock across an automount point of some nature.
David Howells08734042009-04-03 16:42:42 +0100111 */
Bryan Schumaker2311b942012-05-10 15:07:32 -0400112void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
David Howells08734042009-04-03 16:42:42 +0100113{
114 struct nfs_fscache_key *key, *xkey;
115 struct nfs_server *nfss = NFS_SB(sb);
116 struct rb_node **p, *parent;
Bryan Schumaker2311b942012-05-10 15:07:32 -0400117 int diff;
David Howells2df54802009-09-23 14:36:39 -0400118
Trond Myklebustdea1bb32019-08-03 13:39:24 -0400119 nfss->fscache_key = NULL;
120 nfss->fscache = NULL;
David Howells2df54802009-09-23 14:36:39 -0400121 if (!uniq) {
122 uniq = "";
123 ulen = 1;
124 }
125
David Howells08734042009-04-03 16:42:42 +0100126 key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
127 if (!key)
128 return;
129
130 key->nfs_client = nfss->nfs_client;
Scott Mayhew62a55d02019-12-10 07:31:14 -0500131 key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
David Howells08734042009-04-03 16:42:42 +0100132 key->key.nfs_server.flags = nfss->flags;
133 key->key.nfs_server.rsize = nfss->rsize;
134 key->key.nfs_server.wsize = nfss->wsize;
135 key->key.nfs_server.acregmin = nfss->acregmin;
136 key->key.nfs_server.acregmax = nfss->acregmax;
137 key->key.nfs_server.acdirmin = nfss->acdirmin;
138 key->key.nfs_server.acdirmax = nfss->acdirmax;
139 key->key.nfs_server.fsid = nfss->fsid;
140 key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
141
142 key->key.uniq_len = ulen;
143 memcpy(key->key.uniquifier, uniq, ulen);
144
145 spin_lock(&nfs_fscache_keys_lock);
146 p = &nfs_fscache_keys.rb_node;
147 parent = NULL;
148 while (*p) {
149 parent = *p;
150 xkey = rb_entry(parent, struct nfs_fscache_key, node);
151
152 if (key->nfs_client < xkey->nfs_client)
153 goto go_left;
154 if (key->nfs_client > xkey->nfs_client)
155 goto go_right;
156
157 diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
158 if (diff < 0)
159 goto go_left;
160 if (diff > 0)
161 goto go_right;
162
163 if (key->key.uniq_len == 0)
164 goto non_unique;
165 diff = memcmp(key->key.uniquifier,
166 xkey->key.uniquifier,
167 key->key.uniq_len);
168 if (diff < 0)
169 goto go_left;
170 if (diff > 0)
171 goto go_right;
172 goto non_unique;
173
174 go_left:
175 p = &(*p)->rb_left;
176 continue;
177 go_right:
178 p = &(*p)->rb_right;
179 }
180
181 rb_link_node(&key->node, parent, p);
182 rb_insert_color(&key->node, &nfs_fscache_keys);
183 spin_unlock(&nfs_fscache_keys_lock);
184 nfss->fscache_key = key;
185
186 /* create a cache index for looking up filehandles */
187 nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
188 &nfs_fscache_super_index_def,
Dave Wysochanskid9bfced2020-04-15 16:14:41 -0400189 &key->key,
190 sizeof(key->key) + ulen,
David Howells402cb8d2018-04-04 13:41:28 +0100191 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +0100192 nfss, 0, true);
David Howells08734042009-04-03 16:42:42 +0100193 dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
194 nfss, nfss->fscache);
195 return;
196
197non_unique:
198 spin_unlock(&nfs_fscache_keys_lock);
199 kfree(key);
200 nfss->fscache_key = NULL;
201 nfss->fscache = NULL;
202 printk(KERN_WARNING "NFS:"
203 " Cache request denied due to non-unique superblock keys\n");
204}
205
206/*
207 * release a per-superblock cookie
208 */
209void nfs_fscache_release_super_cookie(struct super_block *sb)
210{
211 struct nfs_server *nfss = NFS_SB(sb);
212
213 dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
214 nfss, nfss->fscache);
215
David Howells402cb8d2018-04-04 13:41:28 +0100216 fscache_relinquish_cookie(nfss->fscache, NULL, false);
David Howells08734042009-04-03 16:42:42 +0100217 nfss->fscache = NULL;
218
219 if (nfss->fscache_key) {
220 spin_lock(&nfs_fscache_keys_lock);
221 rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
222 spin_unlock(&nfs_fscache_keys_lock);
223 kfree(nfss->fscache_key);
224 nfss->fscache_key = NULL;
225 }
226}
David Howellsef79c092009-04-03 16:42:43 +0100227
Dave Wysochanski50eaa652020-04-16 06:06:08 -0400228static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
229 struct nfs_inode *nfsi)
230{
231 memset(auxdata, 0, sizeof(*auxdata));
232 auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
233 auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
234 auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
235 auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
236
237 if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
238 auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
239}
240
David Howellsef79c092009-04-03 16:42:43 +0100241/*
242 * Initialise the per-inode cache cookie pointer for an NFS inode.
243 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100244void nfs_fscache_init_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100245{
David Howells402cb8d2018-04-04 13:41:28 +0100246 struct nfs_fscache_inode_auxdata auxdata;
Trond Myklebustdea1bb32019-08-03 13:39:24 -0400247 struct nfs_server *nfss = NFS_SERVER(inode);
David Howellsef79c092009-04-03 16:42:43 +0100248 struct nfs_inode *nfsi = NFS_I(inode);
249
David Howellsf1fe29b2013-09-27 11:20:03 +0100250 nfsi->fscache = NULL;
Trond Myklebustdea1bb32019-08-03 13:39:24 -0400251 if (!(nfss->fscache && S_ISREG(inode->i_mode)))
David Howellsef79c092009-04-03 16:42:43 +0100252 return;
David Howells402cb8d2018-04-04 13:41:28 +0100253
Dave Wysochanski50eaa652020-04-16 06:06:08 -0400254 nfs_fscache_update_auxdata(&auxdata, nfsi);
David Howells402cb8d2018-04-04 13:41:28 +0100255
David Howellsf1fe29b2013-09-27 11:20:03 +0100256 nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
257 &nfs_fscache_inode_object_def,
David Howells402cb8d2018-04-04 13:41:28 +0100258 nfsi->fh.data, nfsi->fh.size,
259 &auxdata, sizeof(auxdata),
David Howellsee1235a2018-04-04 13:41:28 +0100260 nfsi, nfsi->vfs_inode.i_size, false);
David Howellsef79c092009-04-03 16:42:43 +0100261}
262
263/*
264 * Release a per-inode cookie.
265 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100266void nfs_fscache_clear_inode(struct inode *inode)
David Howellsef79c092009-04-03 16:42:43 +0100267{
David Howells402cb8d2018-04-04 13:41:28 +0100268 struct nfs_fscache_inode_auxdata auxdata;
David Howellsef79c092009-04-03 16:42:43 +0100269 struct nfs_inode *nfsi = NFS_I(inode);
David Howellsf1fe29b2013-09-27 11:20:03 +0100270 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howellsef79c092009-04-03 16:42:43 +0100271
David Howellsf1fe29b2013-09-27 11:20:03 +0100272 dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
David Howellsef79c092009-04-03 16:42:43 +0100273
Dave Wysochanski50eaa652020-04-16 06:06:08 -0400274 nfs_fscache_update_auxdata(&auxdata, nfsi);
David Howells402cb8d2018-04-04 13:41:28 +0100275 fscache_relinquish_cookie(cookie, &auxdata, false);
David Howellsef79c092009-04-03 16:42:43 +0100276 nfsi->fscache = NULL;
277}
278
David Howellsf1fe29b2013-09-27 11:20:03 +0100279static bool nfs_fscache_can_enable(void *data)
David Howellsef79c092009-04-03 16:42:43 +0100280{
David Howellsf1fe29b2013-09-27 11:20:03 +0100281 struct inode *inode = data;
David Howellsef79c092009-04-03 16:42:43 +0100282
David Howellsf1fe29b2013-09-27 11:20:03 +0100283 return !inode_is_open_for_write(inode);
David Howellsef79c092009-04-03 16:42:43 +0100284}
285
286/*
David Howellsf1fe29b2013-09-27 11:20:03 +0100287 * Enable or disable caching for a file that is being opened as appropriate.
288 * The cookie is allocated when the inode is initialised, but is not enabled at
289 * that time. Enablement is deferred to file-open time to avoid stat() and
290 * access() thrashing the cache.
291 *
292 * For now, with NFS, only regular files that are open read-only will be able
293 * to use the cache.
294 *
295 * We enable the cache for an inode if we open it read-only and it isn't
296 * currently open for writing. We disable the cache if the inode is open
297 * write-only.
298 *
299 * The caller uses the file struct to pin i_writecount on the inode before
300 * calling us when a file is opened for writing, so we can make use of that.
301 *
302 * Note that this may be invoked multiple times in parallel by parallel
303 * nfs_open() functions.
David Howellsef79c092009-04-03 16:42:43 +0100304 */
David Howellsf1fe29b2013-09-27 11:20:03 +0100305void nfs_fscache_open_file(struct inode *inode, struct file *filp)
David Howellsef79c092009-04-03 16:42:43 +0100306{
David Howells402cb8d2018-04-04 13:41:28 +0100307 struct nfs_fscache_inode_auxdata auxdata;
David Howellsf1fe29b2013-09-27 11:20:03 +0100308 struct nfs_inode *nfsi = NFS_I(inode);
309 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howellsef79c092009-04-03 16:42:43 +0100310
David Howellsf1fe29b2013-09-27 11:20:03 +0100311 if (!fscache_cookie_valid(cookie))
312 return;
David Howellsef79c092009-04-03 16:42:43 +0100313
Dave Wysochanski50eaa652020-04-16 06:06:08 -0400314 nfs_fscache_update_auxdata(&auxdata, nfsi);
David Howells402cb8d2018-04-04 13:41:28 +0100315
David Howellsf1fe29b2013-09-27 11:20:03 +0100316 if (inode_is_open_for_write(inode)) {
317 dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
318 clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
David Howells402cb8d2018-04-04 13:41:28 +0100319 fscache_disable_cookie(cookie, &auxdata, true);
David Howellsf1fe29b2013-09-27 11:20:03 +0100320 fscache_uncache_all_inode_pages(cookie, inode);
321 } else {
322 dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
David Howellsee1235a2018-04-04 13:41:28 +0100323 fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
David Howells402cb8d2018-04-04 13:41:28 +0100324 nfs_fscache_can_enable, inode);
David Howellsf1fe29b2013-09-27 11:20:03 +0100325 if (fscache_cookie_enabled(cookie))
326 set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
David Howellsef79c092009-04-03 16:42:43 +0100327 }
328}
David Howellsf1fe29b2013-09-27 11:20:03 +0100329EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
David Howells545db452009-04-03 16:42:44 +0100330
331/*
332 * Release the caching state associated with a page, if the page isn't busy
333 * interacting with the cache.
334 * - Returns true (can release page) or false (page busy).
335 */
336int nfs_fscache_release_page(struct page *page, gfp_t gfp)
337{
David Howells545db452009-04-03 16:42:44 +0100338 if (PageFsCache(page)) {
David Howellsf1fe29b2013-09-27 11:20:03 +0100339 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
Trond Myklebust2c174002010-02-08 09:32:27 -0500340
341 BUG_ON(!cookie);
David Howells545db452009-04-03 16:42:44 +0100342 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100343 cookie, page, NFS_I(page->mapping->host));
David Howells545db452009-04-03 16:42:44 +0100344
David Howells201a1542009-11-19 18:11:35 +0000345 if (!fscache_maybe_release_page(cookie, page, gfp))
346 return 0;
347
Li RongQinge9f456c2014-11-23 12:47:41 +0800348 nfs_inc_fscache_stats(page->mapping->host,
349 NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells545db452009-04-03 16:42:44 +0100350 }
351
352 return 1;
353}
354
355/*
356 * Release the caching state associated with a page if undergoing complete page
357 * invalidation.
358 */
359void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
360{
David Howellsf1fe29b2013-09-27 11:20:03 +0100361 struct fscache_cookie *cookie = nfs_i_fscache(inode);
David Howells545db452009-04-03 16:42:44 +0100362
363 BUG_ON(!cookie);
364
365 dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100366 cookie, page, NFS_I(inode));
David Howells545db452009-04-03 16:42:44 +0100367
368 fscache_wait_on_page_write(cookie, page);
369
370 BUG_ON(!PageLocked(page));
371 fscache_uncache_page(cookie, page);
Li RongQinge9f456c2014-11-23 12:47:41 +0800372 nfs_inc_fscache_stats(page->mapping->host,
373 NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells545db452009-04-03 16:42:44 +0100374}
David Howells9a9fc1c2009-04-03 16:42:44 +0100375
376/*
377 * Handle completion of a page being read from the cache.
378 * - Called in process (keventd) context.
379 */
380static void nfs_readpage_from_fscache_complete(struct page *page,
381 void *context,
382 int error)
383{
384 dfprintk(FSCACHE,
385 "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
386 page, context, error);
387
388 /* if the read completes with an error, we just unlock the page and let
389 * the VM reissue the readpage */
390 if (!error) {
391 SetPageUptodate(page);
392 unlock_page(page);
393 } else {
394 error = nfs_readpage_async(context, page->mapping->host, page);
395 if (error)
396 unlock_page(page);
397 }
398}
399
400/*
401 * Retrieve a page from fscache
402 */
403int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
404 struct inode *inode, struct page *page)
405{
406 int ret;
407
408 dfprintk(FSCACHE,
409 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100410 nfs_i_fscache(inode), page, page->index, page->flags, inode);
David Howells9a9fc1c2009-04-03 16:42:44 +0100411
David Howellsf1fe29b2013-09-27 11:20:03 +0100412 ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
David Howells9a9fc1c2009-04-03 16:42:44 +0100413 page,
414 nfs_readpage_from_fscache_complete,
415 ctx,
416 GFP_KERNEL);
417
418 switch (ret) {
419 case 0: /* read BIO submitted (page in fscache) */
420 dfprintk(FSCACHE,
421 "NFS: readpage_from_fscache: BIO submitted\n");
Li RongQinge9f456c2014-11-23 12:47:41 +0800422 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
David Howells9a9fc1c2009-04-03 16:42:44 +0100423 return ret;
424
425 case -ENOBUFS: /* inode not in cache */
426 case -ENODATA: /* page not in cache */
Li RongQinge9f456c2014-11-23 12:47:41 +0800427 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
David Howells9a9fc1c2009-04-03 16:42:44 +0100428 dfprintk(FSCACHE,
429 "NFS: readpage_from_fscache %d\n", ret);
430 return 1;
431
432 default:
433 dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
Li RongQinge9f456c2014-11-23 12:47:41 +0800434 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
David Howells9a9fc1c2009-04-03 16:42:44 +0100435 }
436 return ret;
437}
438
439/*
440 * Retrieve a set of pages from fscache
441 */
442int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
443 struct inode *inode,
444 struct address_space *mapping,
445 struct list_head *pages,
446 unsigned *nr_pages)
447{
Chuck Lever0f15c532010-05-07 13:33:48 -0400448 unsigned npages = *nr_pages;
449 int ret;
David Howells9a9fc1c2009-04-03 16:42:44 +0100450
451 dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100452 nfs_i_fscache(inode), npages, inode);
David Howells9a9fc1c2009-04-03 16:42:44 +0100453
David Howellsf1fe29b2013-09-27 11:20:03 +0100454 ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
David Howells9a9fc1c2009-04-03 16:42:44 +0100455 mapping, pages, nr_pages,
456 nfs_readpage_from_fscache_complete,
457 ctx,
458 mapping_gfp_mask(mapping));
459 if (*nr_pages < npages)
460 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
461 npages);
462 if (*nr_pages > 0)
463 nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
464 *nr_pages);
465
466 switch (ret) {
467 case 0: /* read submitted to the cache for all pages */
468 BUG_ON(!list_empty(pages));
469 BUG_ON(*nr_pages != 0);
470 dfprintk(FSCACHE,
471 "NFS: nfs_getpages_from_fscache: submitted\n");
472
473 return ret;
474
475 case -ENOBUFS: /* some pages aren't cached and can't be */
476 case -ENODATA: /* some pages aren't cached */
477 dfprintk(FSCACHE,
478 "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
479 return 1;
480
481 default:
482 dfprintk(FSCACHE,
483 "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
484 }
485
486 return ret;
487}
David Howells7f8e05f2009-04-03 16:42:45 +0100488
489/*
490 * Store a newly fetched page in fscache
491 * - PG_fscache must be set on the page
492 */
493void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
494{
495 int ret;
496
497 dfprintk(FSCACHE,
498 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
David Howellsf1fe29b2013-09-27 11:20:03 +0100499 nfs_i_fscache(inode), page, page->index, page->flags, sync);
David Howells7f8e05f2009-04-03 16:42:45 +0100500
David Howellsee1235a2018-04-04 13:41:28 +0100501 ret = fscache_write_page(nfs_i_fscache(inode), page,
502 inode->i_size, GFP_KERNEL);
David Howells7f8e05f2009-04-03 16:42:45 +0100503 dfprintk(FSCACHE,
504 "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
505 page, page->index, page->flags, ret);
506
507 if (ret != 0) {
David Howellsf1fe29b2013-09-27 11:20:03 +0100508 fscache_uncache_page(nfs_i_fscache(inode), page);
Li RongQinge9f456c2014-11-23 12:47:41 +0800509 nfs_inc_fscache_stats(inode,
510 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
511 nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
David Howells7f8e05f2009-04-03 16:42:45 +0100512 } else {
Li RongQinge9f456c2014-11-23 12:47:41 +0800513 nfs_inc_fscache_stats(inode,
514 NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
David Howells7f8e05f2009-04-03 16:42:45 +0100515 }
516}