blob: 9a10e68c5f30e8a2f5fc713c5658af365319b846 [file] [log] [blame]
Thomas Gleixner1f327612019-05-28 09:57:16 -07001// SPDX-License-Identifier: GPL-2.0-only
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -08002/*
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -08003 * This file contians vfs address (mmap) ops for 9P2000.
4 *
5 * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
6 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -08007 */
8
9#include <linux/module.h>
10#include <linux/errno.h>
11#include <linux/fs.h>
12#include <linux/file.h>
13#include <linux/stat.h>
14#include <linux/string.h>
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080015#include <linux/inet.h>
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080016#include <linux/pagemap.h>
17#include <linux/idr.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
David Howellsd7bdba12021-12-22 17:21:04 +000019#include <linux/swap.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
David Howellseb497942021-11-02 08:29:55 +000021#include <linux/netfs.h>
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050022#include <net/9p/9p.h>
23#include <net/9p/client.h>
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080024
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080025#include "v9fs.h"
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080026#include "v9fs_vfs.h"
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050027#include "cache.h"
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +053028#include "fid.h"
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080029
30/**
David Howellseb497942021-11-02 08:29:55 +000031 * v9fs_req_issue_op - Issue a read from 9P
32 * @subreq: The read to make
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080033 */
David Howellseb497942021-11-02 08:29:55 +000034static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080035{
David Howellseb497942021-11-02 08:29:55 +000036 struct netfs_read_request *rreq = subreq->rreq;
37 struct p9_fid *fid = rreq->netfs_priv;
Al Viroe1200fe62015-04-01 23:42:28 -040038 struct iov_iter to;
David Howellseb497942021-11-02 08:29:55 +000039 loff_t pos = subreq->start + subreq->transferred;
40 size_t len = subreq->len - subreq->transferred;
41 int total, err;
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080042
David Howellseb497942021-11-02 08:29:55 +000043 iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050044
David Howellseb497942021-11-02 08:29:55 +000045 total = p9_client_read(fid, pos, &to, &err);
Dominique Martinet19d1c322022-01-10 20:10:31 +090046
47 /* if we just extended the file size, any portion not in
48 * cache won't be on server and is zeroes */
49 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
50
David Howellseb497942021-11-02 08:29:55 +000051 netfs_subreq_terminated(subreq, err ?: total, false);
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -080052}
53
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050054/**
David Howellseb497942021-11-02 08:29:55 +000055 * v9fs_init_rreq - Initialise a read request
56 * @rreq: The read request
57 * @file: The file being read from
58 */
59static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
60{
61 struct p9_fid *fid = file->private_data;
62
63 refcount_inc(&fid->count);
64 rreq->netfs_priv = fid;
65}
66
67/**
68 * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
69 * @mapping: unused mapping of request to cleanup
70 * @priv: private data to cleanup, a fid, guaranted non-null.
71 */
72static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
73{
74 struct p9_fid *fid = priv;
75
76 p9_client_clunk(fid);
77}
78
79/**
80 * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
81 * @inode: The inode to check
82 */
83static bool v9fs_is_cache_enabled(struct inode *inode)
84{
David Howells24e42e32020-11-18 09:06:42 +000085 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
86
87 return fscache_cookie_enabled(cookie) && cookie->cache_priv;
David Howellseb497942021-11-02 08:29:55 +000088}
89
90/**
91 * v9fs_begin_cache_operation - Begin a cache operation for a read
92 * @rreq: The read request
93 */
94static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
95{
David Howells2cee6fb2021-10-25 21:53:44 +010096#ifdef CONFIG_9P_FSCACHE
David Howellseb497942021-11-02 08:29:55 +000097 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
98
David Howells24e42e32020-11-18 09:06:42 +000099 return fscache_begin_read_operation(&rreq->cache_resources, cookie);
David Howells2cee6fb2021-10-25 21:53:44 +0100100#else
101 return -ENOBUFS;
102#endif
David Howellseb497942021-11-02 08:29:55 +0000103}
104
105static const struct netfs_read_request_ops v9fs_req_ops = {
106 .init_rreq = v9fs_init_rreq,
107 .is_cache_enabled = v9fs_is_cache_enabled,
108 .begin_cache_operation = v9fs_begin_cache_operation,
109 .issue_op = v9fs_req_issue_op,
110 .cleanup = v9fs_req_cleanup,
111};
112
113/**
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530114 * v9fs_vfs_readpage - read an entire page in from 9P
David Howellseb497942021-11-02 08:29:55 +0000115 * @file: file being read
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530116 * @page: structure to page
117 *
118 */
David Howellseb497942021-11-02 08:29:55 +0000119static int v9fs_vfs_readpage(struct file *file, struct page *page)
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530120{
David Howells78525c72021-08-11 09:49:13 +0100121 struct folio *folio = page_folio(page);
122
123 return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530124}
125
126/**
David Howellseb497942021-11-02 08:29:55 +0000127 * v9fs_vfs_readahead - read a set of pages from 9P
128 * @ractl: The readahead parameters
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500129 */
David Howellseb497942021-11-02 08:29:55 +0000130static void v9fs_vfs_readahead(struct readahead_control *ractl)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500131{
David Howellseb497942021-11-02 08:29:55 +0000132 netfs_readahead(ractl, &v9fs_req_ops, NULL);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500133}
134
135/**
136 * v9fs_release_page - release the private state associated with a page
David Howellsbc868032021-10-04 22:07:22 +0100137 * @page: The page to be released
138 * @gfp: The caller's allocation restrictions
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500139 *
140 * Returns 1 if the page can be released, false otherwise.
141 */
142
143static int v9fs_release_page(struct page *page, gfp_t gfp)
144{
David Howells78525c72021-08-11 09:49:13 +0100145 struct folio *folio = page_folio(page);
David Howells93c84612020-11-18 09:06:42 +0000146 struct inode *inode = folio_inode(folio);
David Howells78525c72021-08-11 09:49:13 +0100147
148 if (folio_test_private(folio))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500149 return 0;
David Howellseb497942021-11-02 08:29:55 +0000150#ifdef CONFIG_9P_FSCACHE
David Howells78525c72021-08-11 09:49:13 +0100151 if (folio_test_fscache(folio)) {
David Howellsd7bdba12021-12-22 17:21:04 +0000152 if (current_is_kswapd() || !(gfp & __GFP_FS))
David Howellseb497942021-11-02 08:29:55 +0000153 return 0;
David Howells78525c72021-08-11 09:49:13 +0100154 folio_wait_fscache(folio);
David Howellseb497942021-11-02 08:29:55 +0000155 }
156#endif
David Howells93c84612020-11-18 09:06:42 +0000157 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
David Howellseb497942021-11-02 08:29:55 +0000158 return 1;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500159}
160
161/**
162 * v9fs_invalidate_page - Invalidate a page completely or partially
David Howellsbc868032021-10-04 22:07:22 +0100163 * @page: The page to be invalidated
164 * @offset: offset of the invalidated region
165 * @length: length of the invalidated region
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500166 */
167
Lukas Czernerd47992f2013-05-21 23:17:23 -0400168static void v9fs_invalidate_page(struct page *page, unsigned int offset,
169 unsigned int length)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500170{
David Howells78525c72021-08-11 09:49:13 +0100171 struct folio *folio = page_folio(page);
172
173 folio_wait_fscache(folio);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500174}
175
David Howells93c84612020-11-18 09:06:42 +0000176static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
177 bool was_async)
178{
179 struct v9fs_inode *v9inode = priv;
180 __le32 version;
181
182 if (IS_ERR_VALUE(transferred_or_error) &&
183 transferred_or_error != -ENOBUFS) {
184 version = cpu_to_le32(v9inode->qid.version);
185 fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
186 i_size_read(&v9inode->vfs_inode), 0);
187 }
188}
189
David Howells78525c72021-08-11 09:49:13 +0100190static int v9fs_vfs_write_folio_locked(struct folio *folio)
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530191{
David Howells78525c72021-08-11 09:49:13 +0100192 struct inode *inode = folio_inode(folio);
Al Viro371098c2015-04-01 21:54:42 -0400193 struct v9fs_inode *v9inode = V9FS_I(inode);
David Howells93c84612020-11-18 09:06:42 +0000194 struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
David Howells78525c72021-08-11 09:49:13 +0100195 loff_t start = folio_pos(folio);
196 loff_t i_size = i_size_read(inode);
Al Viro371098c2015-04-01 21:54:42 -0400197 struct iov_iter from;
David Howells78525c72021-08-11 09:49:13 +0100198 size_t len = folio_size(folio);
199 int err;
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530200
David Howells78525c72021-08-11 09:49:13 +0100201 if (start >= i_size)
202 return 0; /* Simultaneous truncation occurred */
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530203
David Howells78525c72021-08-11 09:49:13 +0100204 len = min_t(loff_t, i_size - start, len);
205
206 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530207
Aneesh Kumar K.V6b39f6d2011-02-28 17:04:03 +0530208 /* We should have writeback_fid always set */
209 BUG_ON(!v9inode->writeback_fid);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530210
David Howells93c84612020-11-18 09:06:42 +0000211 folio_wait_fscache(folio);
David Howells78525c72021-08-11 09:49:13 +0100212 folio_start_writeback(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530213
David Howellseb497942021-11-02 08:29:55 +0000214 p9_client_write(v9inode->writeback_fid, start, &from, &err);
Al Viro371098c2015-04-01 21:54:42 -0400215
David Howells93c84612020-11-18 09:06:42 +0000216 if (err == 0 &&
217 fscache_cookie_enabled(cookie) &&
218 test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
219 folio_start_fscache(folio);
220 fscache_write_to_cache(v9fs_inode_cookie(v9inode),
221 folio_mapping(folio), start, len, i_size,
222 v9fs_write_to_cache_done, v9inode,
223 true);
224 }
225
David Howells78525c72021-08-11 09:49:13 +0100226 folio_end_writeback(folio);
Al Viro371098c2015-04-01 21:54:42 -0400227 return err;
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530228}
229
230static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
231{
David Howells78525c72021-08-11 09:49:13 +0100232 struct folio *folio = page_folio(page);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530233 int retval;
234
David Howells78525c72021-08-11 09:49:13 +0100235 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
Dominique Martinetfb89b452014-01-10 13:44:09 +0100236
David Howells78525c72021-08-11 09:49:13 +0100237 retval = v9fs_vfs_write_folio_locked(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530238 if (retval < 0) {
239 if (retval == -EAGAIN) {
David Howells78525c72021-08-11 09:49:13 +0100240 folio_redirty_for_writepage(wbc, folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530241 retval = 0;
242 } else {
David Howells78525c72021-08-11 09:49:13 +0100243 mapping_set_error(folio_mapping(folio), retval);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530244 }
245 } else
246 retval = 0;
247
David Howells78525c72021-08-11 09:49:13 +0100248 folio_unlock(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530249 return retval;
250}
251
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500252/**
253 * v9fs_launder_page - Writeback a dirty page
David Howellsbc868032021-10-04 22:07:22 +0100254 * @page: The page to be cleaned up
255 *
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500256 * Returns 0 on success.
257 */
258
259static int v9fs_launder_page(struct page *page)
260{
David Howells78525c72021-08-11 09:49:13 +0100261 struct folio *folio = page_folio(page);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530262 int retval;
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530263
David Howells78525c72021-08-11 09:49:13 +0100264 if (folio_clear_dirty_for_io(folio)) {
265 retval = v9fs_vfs_write_folio_locked(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530266 if (retval)
267 return retval;
268 }
David Howells78525c72021-08-11 09:49:13 +0100269 folio_wait_fscache(folio);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500270 return 0;
271}
272
jvrao3e24ad22010-08-24 15:43:28 +0000273/**
274 * v9fs_direct_IO - 9P address space operation for direct I/O
jvrao3e24ad22010-08-24 15:43:28 +0000275 * @iocb: target I/O control block
David Howellsbc868032021-10-04 22:07:22 +0100276 * @iter: The data/buffer to use
jvrao3e24ad22010-08-24 15:43:28 +0000277 *
278 * The presence of v9fs_direct_IO() in the address space ops vector
279 * allowes open() O_DIRECT flags which would have failed otherwise.
280 *
281 * In the non-cached mode, we shunt off direct read and write requests before
282 * the VFS gets them, so this method should never be called.
283 *
284 * Direct IO is not 'yet' supported in the cached mode. Hence when
285 * this routine is called through generic_file_aio_read(), the read/write fails
286 * with an error.
287 *
288 */
Aneesh Kumar K.Ve959b542011-02-28 17:04:04 +0530289static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700290v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
jvrao3e24ad22010-08-24 15:43:28 +0000291{
Al Viro9565a542015-04-01 22:32:23 -0400292 struct file *file = iocb->ki_filp;
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700293 loff_t pos = iocb->ki_pos;
Al Viro42b1ab92015-04-01 23:49:24 -0400294 ssize_t n;
295 int err = 0;
Dominique Martinet6e195b02021-11-02 22:16:43 +0900296
Omar Sandoval6f673762015-03-16 04:33:52 -0700297 if (iov_iter_rw(iter) == WRITE) {
Al Viro42b1ab92015-04-01 23:49:24 -0400298 n = p9_client_write(file->private_data, pos, iter, &err);
299 if (n) {
Al Viro9565a542015-04-01 22:32:23 -0400300 struct inode *inode = file_inode(file);
301 loff_t i_size = i_size_read(inode);
Dominique Martinet6e195b02021-11-02 22:16:43 +0900302
Al Viro42b1ab92015-04-01 23:49:24 -0400303 if (pos + n > i_size)
304 inode_add_bytes(inode, pos + n - i_size);
Al Viro9565a542015-04-01 22:32:23 -0400305 }
Al Viro42b1ab92015-04-01 23:49:24 -0400306 } else {
307 n = p9_client_read(file->private_data, pos, iter, &err);
Al Viro9565a542015-04-01 22:32:23 -0400308 }
Al Viro42b1ab92015-04-01 23:49:24 -0400309 return n ? n : err;
jvrao3e24ad22010-08-24 15:43:28 +0000310}
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530311
312static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
Dominique Martinet6e195b02021-11-02 22:16:43 +0900313 loff_t pos, unsigned int len, unsigned int flags,
David Howells78525c72021-08-11 09:49:13 +0100314 struct page **subpagep, void **fsdata)
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530315{
David Howellseb497942021-11-02 08:29:55 +0000316 int retval;
David Howells78525c72021-08-11 09:49:13 +0100317 struct folio *folio;
David Howellseb497942021-11-02 08:29:55 +0000318 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
Dominique Martinetfb89b452014-01-10 13:44:09 +0100319
320 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
321
Aneesh Kumar K.V6b39f6d2011-02-28 17:04:03 +0530322 BUG_ON(!v9inode->writeback_fid);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530323
David Howellseb497942021-11-02 08:29:55 +0000324 /* Prefetch area to be written into the cache if we're caching this
325 * file. We need to do this before we get a lock on the page in case
326 * there's more than one writer competing for the same cache block.
327 */
David Howells78525c72021-08-11 09:49:13 +0100328 retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
David Howellseb497942021-11-02 08:29:55 +0000329 &v9fs_req_ops, NULL);
330 if (retval < 0)
331 return retval;
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530332
David Howells78525c72021-08-11 09:49:13 +0100333 *subpagep = &folio->page;
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530334 return retval;
335}
336
337static int v9fs_write_end(struct file *filp, struct address_space *mapping,
Dominique Martinet6e195b02021-11-02 22:16:43 +0900338 loff_t pos, unsigned int len, unsigned int copied,
David Howells78525c72021-08-11 09:49:13 +0100339 struct page *subpage, void *fsdata)
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530340{
341 loff_t last_pos = pos + copied;
David Howells78525c72021-08-11 09:49:13 +0100342 struct folio *folio = page_folio(subpage);
343 struct inode *inode = mapping->host;
David Howells93c84612020-11-18 09:06:42 +0000344 struct v9fs_inode *v9inode = V9FS_I(inode);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530345
Dominique Martinetfb89b452014-01-10 13:44:09 +0100346 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
347
David Howells78525c72021-08-11 09:49:13 +0100348 if (!folio_test_uptodate(folio)) {
Alexander Levin56ae4142017-04-10 18:46:51 +0000349 if (unlikely(copied < len)) {
350 copied = 0;
351 goto out;
Alexander Levin56ae4142017-04-10 18:46:51 +0000352 }
David Howellseb497942021-11-02 08:29:55 +0000353
David Howells78525c72021-08-11 09:49:13 +0100354 folio_mark_uptodate(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530355 }
David Howellseb497942021-11-02 08:29:55 +0000356
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530357 /*
358 * No need to use i_size_read() here, the i_size
359 * cannot change under us because we hold the i_mutex.
360 */
361 if (last_pos > inode->i_size) {
362 inode_add_bytes(inode, last_pos - inode->i_size);
363 i_size_write(inode, last_pos);
David Howells93c84612020-11-18 09:06:42 +0000364 fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530365 }
David Howells78525c72021-08-11 09:49:13 +0100366 folio_mark_dirty(folio);
Al Viro77469c32016-08-29 20:56:35 -0400367out:
David Howells78525c72021-08-11 09:49:13 +0100368 folio_unlock(folio);
369 folio_put(folio);
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530370
371 return copied;
372}
373
David Howells93c84612020-11-18 09:06:42 +0000374#ifdef CONFIG_9P_FSCACHE
375/*
376 * Mark a page as having been made dirty and thus needing writeback. We also
377 * need to pin the cache object to write back to.
378 */
379static int v9fs_set_page_dirty(struct page *page)
380{
381 struct v9fs_inode *v9inode = V9FS_I(page->mapping->host);
382
383 return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode));
384}
385#else
386#define v9fs_set_page_dirty __set_page_dirty_nobuffers
387#endif
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530388
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700389const struct address_space_operations v9fs_addr_operations = {
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530390 .readpage = v9fs_vfs_readpage,
David Howellseb497942021-11-02 08:29:55 +0000391 .readahead = v9fs_vfs_readahead,
David Howells93c84612020-11-18 09:06:42 +0000392 .set_page_dirty = v9fs_set_page_dirty,
Aneesh Kumar K.V7263ceb2011-02-28 17:03:58 +0530393 .writepage = v9fs_vfs_writepage,
394 .write_begin = v9fs_write_begin,
395 .write_end = v9fs_write_end,
396 .releasepage = v9fs_release_page,
397 .invalidatepage = v9fs_invalidate_page,
398 .launder_page = v9fs_launder_page,
399 .direct_IO = v9fs_direct_IO,
Eric Van Hensbergen147b31c2006-01-18 17:43:02 -0800400};