blob: 1769a44f4819275130de594a45731a9c0af9f6c4 [file] [log] [blame]
Thomas Gleixner1f327612019-05-28 09:57:16 -07001// SPDX-License-Identifier: GPL-2.0-only
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -05002/*
3 * V9FS cache definitions.
4 *
5 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -05006 */
7
8#include <linux/jiffies.h>
9#include <linux/file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050011#include <linux/stat.h>
12#include <linux/sched.h>
13#include <linux/fs.h>
14#include <net/9p/9p.h>
15
16#include "v9fs.h"
17#include "cache.h"
18
19#define CACHETAG_LEN 11
20
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050021struct fscache_netfs v9fs_cache_netfs = {
22 .name = "9p",
23 .version = 0,
24};
25
David Howellsbc868032021-10-04 22:07:22 +010026/*
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050027 * v9fs_random_cachetag - Generate a random tag to be associated
28 * with a new cache session.
29 *
30 * The value of jiffies is used for a fairly randomly cache tag.
31 */
32
33static
34int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
35{
36 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
37 if (!v9ses->cachetag)
38 return -ENOMEM;
39
40 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
41}
42
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050043const struct fscache_cookie_def v9fs_cache_session_index_def = {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +053044 .name = "9P.session",
45 .type = FSCACHE_COOKIE_TYPE_INDEX,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050046};
47
48void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
49{
50 /* If no cache session tag was specified, we generate a random one. */
David Howells402cb8d2018-04-04 13:41:28 +010051 if (!v9ses->cachetag) {
52 if (v9fs_random_cachetag(v9ses) < 0) {
53 v9ses->fscache = NULL;
Bharath Vedartham962a9912019-05-23 01:15:19 +053054 kfree(v9ses->cachetag);
55 v9ses->cachetag = NULL;
David Howells402cb8d2018-04-04 13:41:28 +010056 return;
57 }
58 }
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050059
60 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
61 &v9fs_cache_session_index_def,
David Howells402cb8d2018-04-04 13:41:28 +010062 v9ses->cachetag,
63 strlen(v9ses->cachetag),
64 NULL, 0,
David Howellsee1235a2018-04-04 13:41:28 +010065 v9ses, 0, true);
Joe Perches5d385152011-11-28 10:40:46 -080066 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
67 v9ses, v9ses->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050068}
69
70void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
71{
Joe Perches5d385152011-11-28 10:40:46 -080072 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
73 v9ses, v9ses->fscache);
David Howells402cb8d2018-04-04 13:41:28 +010074 fscache_relinquish_cookie(v9ses->fscache, NULL, false);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050075 v9ses->fscache = NULL;
76}
77
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050078static enum
79fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
80 const void *buffer,
David Howellsee1235a2018-04-04 13:41:28 +010081 uint16_t buflen,
82 loff_t object_size)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050083{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +053084 const struct v9fs_inode *v9inode = cookie_netfs_data;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050085
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +000086 if (buflen != sizeof(v9inode->qid.version))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050087 return FSCACHE_CHECKAUX_OBSOLETE;
88
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +000089 if (memcmp(buffer, &v9inode->qid.version,
90 sizeof(v9inode->qid.version)))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050091 return FSCACHE_CHECKAUX_OBSOLETE;
92
93 return FSCACHE_CHECKAUX_OKAY;
94}
95
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050096const struct fscache_cookie_def v9fs_cache_inode_index_def = {
97 .name = "9p.inode",
98 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050099 .check_aux = v9fs_cache_inode_check_aux,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500100};
101
102void v9fs_cache_inode_get_cookie(struct inode *inode)
103{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530104 struct v9fs_inode *v9inode;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500105 struct v9fs_session_info *v9ses;
106
107 if (!S_ISREG(inode->i_mode))
108 return;
109
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530110 v9inode = V9FS_I(inode);
111 if (v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500112 return;
113
114 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530115 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500116 &v9fs_cache_inode_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100117 &v9inode->qid.path,
118 sizeof(v9inode->qid.path),
119 &v9inode->qid.version,
120 sizeof(v9inode->qid.version),
David Howellsee1235a2018-04-04 13:41:28 +0100121 v9inode,
122 i_size_read(&v9inode->vfs_inode),
123 true);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500124
Joe Perches5d385152011-11-28 10:40:46 -0800125 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
126 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500127}
128
129void v9fs_cache_inode_put_cookie(struct inode *inode)
130{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530131 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500132
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530133 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500134 return;
Joe Perches5d385152011-11-28 10:40:46 -0800135 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
136 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500137
David Howells402cb8d2018-04-04 13:41:28 +0100138 fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
139 false);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530140 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500141}
142
143void v9fs_cache_inode_flush_cookie(struct inode *inode)
144{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530145 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500146
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530147 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500148 return;
Joe Perches5d385152011-11-28 10:40:46 -0800149 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
150 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500151
David Howells402cb8d2018-04-04 13:41:28 +0100152 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530153 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500154}
155
156void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
157{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530158 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500159
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530160 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500161 return;
162
Sasha Levin8f5fed12016-01-07 17:49:51 -0500163 mutex_lock(&v9inode->fscache_lock);
Geyslan G. Bembd126e52013-09-28 20:32:13 -0300164
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500165 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
166 v9fs_cache_inode_flush_cookie(inode);
167 else
168 v9fs_cache_inode_get_cookie(inode);
169
Sasha Levin8f5fed12016-01-07 17:49:51 -0500170 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500171}
172
173void v9fs_cache_inode_reset_cookie(struct inode *inode)
174{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530175 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500176 struct v9fs_session_info *v9ses;
177 struct fscache_cookie *old;
178
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530179 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500180 return;
181
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530182 old = v9inode->fscache;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500183
Sasha Levin8f5fed12016-01-07 17:49:51 -0500184 mutex_lock(&v9inode->fscache_lock);
David Howells402cb8d2018-04-04 13:41:28 +0100185 fscache_relinquish_cookie(v9inode->fscache, NULL, true);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500186
187 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530188 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500189 &v9fs_cache_inode_index_def,
David Howells402cb8d2018-04-04 13:41:28 +0100190 &v9inode->qid.path,
191 sizeof(v9inode->qid.path),
192 &v9inode->qid.version,
193 sizeof(v9inode->qid.version),
David Howellsee1235a2018-04-04 13:41:28 +0100194 v9inode,
195 i_size_read(&v9inode->vfs_inode),
196 true);
Joe Perches5d385152011-11-28 10:40:46 -0800197 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
198 inode, old, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500199
Sasha Levin8f5fed12016-01-07 17:49:51 -0500200 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500201}
202
203int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
204{
205 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530206 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500207
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530208 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500209
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530210 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500211}
212
213void __v9fs_fscache_invalidate_page(struct page *page)
214{
215 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530216 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500217
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530218 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500219
220 if (PageFsCache(page)) {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530221 fscache_wait_on_page_write(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500222 BUG_ON(!PageLocked(page));
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530223 fscache_uncache_page(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500224 }
225}
226
227static void v9fs_vfs_readpage_complete(struct page *page, void *data,
228 int error)
229{
230 if (!error)
231 SetPageUptodate(page);
232
233 unlock_page(page);
234}
235
David Howellsbc868032021-10-04 22:07:22 +0100236/*
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500237 * __v9fs_readpage_from_fscache - read a page from cache
238 *
239 * Returns 0 if the pages are in cache and a BIO is submitted,
240 * 1 if the pages are not in cache and -error otherwise.
241 */
242
243int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
244{
245 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530246 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500247
Joe Perches5d385152011-11-28 10:40:46 -0800248 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530249 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500250 return -ENOBUFS;
251
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530252 ret = fscache_read_or_alloc_page(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500253 page,
254 v9fs_vfs_readpage_complete,
255 NULL,
256 GFP_KERNEL);
257 switch (ret) {
258 case -ENOBUFS:
259 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800260 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500261 return 1;
262 case 0:
Joe Perches5d385152011-11-28 10:40:46 -0800263 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500264 return ret;
265 default:
Joe Perches5d385152011-11-28 10:40:46 -0800266 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500267 return ret;
268 }
269}
270
David Howellsbc868032021-10-04 22:07:22 +0100271/*
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500272 * __v9fs_readpages_from_fscache - read multiple pages from cache
273 *
274 * Returns 0 if the pages are in cache and a BIO is submitted,
275 * 1 if the pages are not in cache and -error otherwise.
276 */
277
278int __v9fs_readpages_from_fscache(struct inode *inode,
279 struct address_space *mapping,
280 struct list_head *pages,
281 unsigned *nr_pages)
282{
283 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530284 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500285
Joe Perches5d385152011-11-28 10:40:46 -0800286 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530287 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500288 return -ENOBUFS;
289
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530290 ret = fscache_read_or_alloc_pages(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500291 mapping, pages, nr_pages,
292 v9fs_vfs_readpage_complete,
293 NULL,
294 mapping_gfp_mask(mapping));
295 switch (ret) {
296 case -ENOBUFS:
297 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800298 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500299 return 1;
300 case 0:
301 BUG_ON(!list_empty(pages));
302 BUG_ON(*nr_pages != 0);
Joe Perches5d385152011-11-28 10:40:46 -0800303 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500304 return ret;
305 default:
Joe Perches5d385152011-11-28 10:40:46 -0800306 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500307 return ret;
308 }
309}
310
David Howellsbc868032021-10-04 22:07:22 +0100311/*
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500312 * __v9fs_readpage_to_fscache - write a page to the cache
313 *
314 */
315
316void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
317{
318 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530319 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500320
Joe Perches5d385152011-11-28 10:40:46 -0800321 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
David Howellsee1235a2018-04-04 13:41:28 +0100322 ret = fscache_write_page(v9inode->fscache, page,
323 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
Joe Perches5d385152011-11-28 10:40:46 -0800324 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500325 if (ret != 0)
326 v9fs_uncache_page(inode, page);
327}
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530328
329/*
330 * wait for a page to complete writing to the cache
331 */
332void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
333{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530334 const struct v9fs_inode *v9inode = V9FS_I(inode);
Joe Perches5d385152011-11-28 10:40:46 -0800335 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530336 if (PageFsCache(page))
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530337 fscache_wait_on_page_write(v9inode->fscache, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530338}