blob: 76d76acbc594397dd52307815282e437001b7d4b [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/nfs/file.c
4 *
5 * Copyright (C) 1992 Rick Sladkey
6 *
7 * Changes Copyright (C) 1994 by Florian La Roche
8 * - Do not copy data too often around in the kernel.
9 * - In nfs_file_read the return value of kmalloc wasn't checked.
10 * - Put in a better version of read look-ahead buffering. Original idea
11 * and implementation by Wai S Kok elekokws@ee.nus.sg.
12 *
13 * Expire cache on write to a file by Wai S Kok (Oct 1994).
14 *
15 * Total rewrite of read side for new NFS buffer cache.. Linus.
16 *
17 * nfs regular file handling functions
18 */
19
Bryan Schumakerddda8e02012-07-30 16:05:23 -040020#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/time.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/fcntl.h>
25#include <linux/stat.h>
26#include <linux/nfs_fs.h>
27#include <linux/nfs_mount.h>
28#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/gfp.h>
Trond Myklebustb608b282010-07-30 15:31:54 -040031#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080033#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include "delegation.h"
Trond Myklebust94387fb2007-07-22 17:09:05 -040036#include "internal.h"
Chuck Lever91d5b472006-03-20 13:44:14 -050037#include "iostat.h"
David Howells545db452009-04-03 16:42:44 +010038#include "fscache.h"
Christoph Hellwig612aa9832014-09-10 08:23:30 -070039#include "pnfs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Trond Myklebustf4ce1292013-08-19 18:59:33 -040041#include "nfstrace.h"
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define NFSDBG_FACILITY NFSDBG_FILE
44
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040045static const struct vm_operations_struct nfs_file_vm_ops;
Trond Myklebust94387fb2007-07-22 17:09:05 -040046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* Hack for future NFS swap support */
48#ifndef IS_SWAPFILE
49# define IS_SWAPFILE(inode) (0)
50#endif
51
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -040052int nfs_check_flags(int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
54 if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
55 return -EINVAL;
56
57 return 0;
58}
Bryan Schumaker89d77c82012-07-30 16:05:25 -040059EXPORT_SYMBOL_GPL(nfs_check_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61/*
62 * Open file
63 */
64static int
65nfs_file_open(struct inode *inode, struct file *filp)
66{
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 int res;
68
Al Viro6de14722013-09-16 10:53:17 -040069 dprintk("NFS: open file(%pD2)\n", filp);
Chuck Levercc0dd2d2008-06-11 17:55:42 -040070
Chuck Leverc2459dc2010-02-01 14:17:14 -050071 nfs_inc_stats(inode, NFSIOS_VFSOPEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 res = nfs_check_flags(filp->f_flags);
73 if (res)
74 return res;
75
Trond Myklebust46cb6502008-06-11 16:32:46 -040076 res = nfs_open(inode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 return res;
78}
79
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -040080int
Linus Torvalds1da177e2005-04-16 15:20:36 -070081nfs_file_release(struct inode *inode, struct file *filp)
82{
Al Viro6de14722013-09-16 10:53:17 -040083 dprintk("NFS: release(%pD2)\n", filp);
Chuck Lever6da24bc2008-06-11 17:55:58 -040084
Chuck Lever91d5b472006-03-20 13:44:14 -050085 nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
Anna Schumakeraff8d8d2015-07-13 14:01:33 -040086 nfs_file_clear_open_context(filp);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -050087 nfs_fscache_release_file(inode, filp);
Anna Schumakeraff8d8d2015-07-13 14:01:33 -040088 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
Bryan Schumaker89d77c82012-07-30 16:05:25 -040090EXPORT_SYMBOL_GPL(nfs_file_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Trond Myklebust980802e2005-06-13 11:14:01 -040092/**
Trond Myklebust37eaeed2021-02-08 08:55:45 -050093 * nfs_revalidate_file_size - Revalidate the file size
Trond Myklebust302fad72019-02-18 13:32:38 -050094 * @inode: pointer to inode struct
95 * @filp: pointer to struct file
Trond Myklebust980802e2005-06-13 11:14:01 -040096 *
97 * Revalidates the file length. This is basically a wrapper around
98 * nfs_revalidate_inode() that takes into account the fact that we may
99 * have cached writes (in which case we don't care about the server's
100 * idea of what the file length is), or O_DIRECT (in which case we
101 * shouldn't trust the cache).
102 */
103static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
104{
105 struct nfs_server *server = NFS_SERVER(inode);
Trond Myklebustd7cf8dd2010-04-16 16:42:46 -0400106
Trond Myklebust980802e2005-06-13 11:14:01 -0400107 if (filp->f_flags & O_DIRECT)
108 goto force_reval;
Trond Myklebust13c0b082021-03-25 21:07:21 -0400109 if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_SIZE))
Trond Myklebustd7cf8dd2010-04-16 16:42:46 -0400110 goto force_reval;
Trond Myklebustd7cf8dd2010-04-16 16:42:46 -0400111 return 0;
Trond Myklebust980802e2005-06-13 11:14:01 -0400112force_reval:
113 return __nfs_revalidate_inode(server, inode);
114}
115
Andrew Morton965c8e52012-12-17 15:59:39 -0800116loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
Trond Myklebust980802e2005-06-13 11:14:01 -0400117{
Al Viro6de14722013-09-16 10:53:17 -0400118 dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
119 filp, offset, whence);
Chuck Leverb84e06c2008-06-11 17:55:34 -0400120
Josef Bacik06222e42011-07-18 13:21:38 -0400121 /*
Andrew Morton965c8e52012-12-17 15:59:39 -0800122 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
Josef Bacik06222e42011-07-18 13:21:38 -0400123 * the cached file length
124 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800125 if (whence != SEEK_SET && whence != SEEK_CUR) {
Trond Myklebust980802e2005-06-13 11:14:01 -0400126 struct inode *inode = filp->f_mapping->host;
Trond Myklebustd5e66342008-09-23 17:28:35 -0400127
Trond Myklebust980802e2005-06-13 11:14:01 -0400128 int retval = nfs_revalidate_file_size(inode, filp);
129 if (retval < 0)
130 return (loff_t)retval;
Andi Kleen79835a712011-09-15 16:06:52 -0700131 }
Trond Myklebustd5e66342008-09-23 17:28:35 -0400132
Andrew Morton965c8e52012-12-17 15:59:39 -0800133 return generic_file_llseek(filp, offset, whence);
Trond Myklebust980802e2005-06-13 11:14:01 -0400134}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400135EXPORT_SYMBOL_GPL(nfs_file_llseek);
Trond Myklebust980802e2005-06-13 11:14:01 -0400136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
138 * Flush all dirty pages, and check for write errors.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Trond Myklebust5445b1f2015-09-05 19:06:58 -0400140static int
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -0700141nfs_file_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Al Viro6de14722013-09-16 10:53:17 -0400143 struct inode *inode = file_inode(file);
Scott Mayhew67dd23f92020-08-01 07:10:38 -0400144 errseq_t since;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Al Viro6de14722013-09-16 10:53:17 -0400146 dprintk("NFS: flush(%pD2)\n", file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Chuck Leverc2459dc2010-02-01 14:17:14 -0500148 nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 if ((file->f_mode & FMODE_WRITE) == 0)
150 return 0;
Trond Myklebust7b159fc2007-07-25 14:09:54 -0400151
Trond Myklebust7fe5c392009-03-19 15:35:50 -0400152 /* Flush writes to the server and return any errors */
Scott Mayhew67dd23f92020-08-01 07:10:38 -0400153 since = filemap_sample_wb_err(file->f_mapping);
154 nfs_wb_all(inode);
155 return filemap_check_wb_err(file->f_mapping, since);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -0400158ssize_t
Al Viro3aa2d192014-04-02 20:14:12 -0400159nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Al Viro6de14722013-09-16 10:53:17 -0400161 struct inode *inode = file_inode(iocb->ki_filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 ssize_t result;
163
Al Viro2ba48ce2015-04-09 13:52:01 -0400164 if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigc8b8e322016-04-07 08:51:58 -0700165 return nfs_file_direct_read(iocb, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Al Viro619d30b2014-03-04 21:53:33 -0500167 dprintk("NFS: read(%pD2, %zu@%lu)\n",
Al Viro6de14722013-09-16 10:53:17 -0400168 iocb->ki_filp,
Al Viro3aa2d192014-04-02 20:14:12 -0400169 iov_iter_count(to), (unsigned long) iocb->ki_pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Trond Myklebusta5864c92016-06-03 17:07:19 -0400171 nfs_start_io_read(inode);
172 result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
Chuck Lever4184dcf2010-02-01 14:17:23 -0500173 if (!result) {
Al Viro3aa2d192014-04-02 20:14:12 -0400174 result = generic_file_read_iter(iocb, to);
Chuck Lever4184dcf2010-02-01 14:17:23 -0500175 if (result > 0)
176 nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
177 }
Trond Myklebusta5864c92016-06-03 17:07:19 -0400178 nfs_end_io_read(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return result;
180}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400181EXPORT_SYMBOL_GPL(nfs_file_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -0400183int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
185{
Al Viro6de14722013-09-16 10:53:17 -0400186 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 int status;
188
Al Viro6de14722013-09-16 10:53:17 -0400189 dprintk("NFS: mmap(%pD2)\n", file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Trond Myklebuste1ebfd32009-03-11 14:37:54 -0400191 /* Note: generic_file_mmap() returns ENOSYS on nommu systems
192 * so we call that before revalidating the mapping
193 */
194 status = generic_file_mmap(file, vma);
Trond Myklebust94387fb2007-07-22 17:09:05 -0400195 if (!status) {
196 vma->vm_ops = &nfs_file_vm_ops;
Trond Myklebuste1ebfd32009-03-11 14:37:54 -0400197 status = nfs_revalidate_mapping(inode, file->f_mapping);
Trond Myklebust94387fb2007-07-22 17:09:05 -0400198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 return status;
200}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400201EXPORT_SYMBOL_GPL(nfs_file_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203/*
204 * Flush any dirty pages for this process, and check for write errors.
205 * The return status from this call provides a reliable indication of
206 * whether any write errors occurred for this process.
207 */
Christoph Hellwig4ff79bc2016-03-02 17:35:54 +0100208static int
NeilBrownbf4b4902017-09-11 13:15:50 +1000209nfs_file_fsync_commit(struct file *file, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
Al Viro6de14722013-09-16 10:53:17 -0400211 struct inode *inode = file_inode(file);
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500212 int ret;
Trond Myklebustaf7fa162010-07-31 14:29:06 -0400213
Al Viro6de14722013-09-16 10:53:17 -0400214 dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Chuck Lever91d5b472006-03-20 13:44:14 -0500216 nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500217 ret = nfs_commit_inode(inode, FLUSH_SYNC);
218 if (ret < 0)
219 return ret;
220 return file_check_and_advance_wb_err(file);
Bryan Schumakera5c58892012-06-20 15:53:42 -0400221}
222
Christoph Hellwig4ff79bc2016-03-02 17:35:54 +0100223int
Bryan Schumakera5c58892012-06-20 15:53:42 -0400224nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
225{
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500226 struct nfs_open_context *ctx = nfs_file_open_context(file);
Al Viro496ad9a2013-01-23 17:07:38 -0500227 struct inode *inode = file_inode(file);
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500228 int ret;
Bryan Schumakera5c58892012-06-20 15:53:42 -0400229
Trond Myklebustf4ce1292013-08-19 18:59:33 -0400230 trace_nfs_fsync_enter(inode);
231
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500232 for (;;) {
Trond Myklebust6fbda892019-04-07 13:59:05 -0400233 ret = file_write_and_wait_range(file, start, end);
Trond Myklebust05990d12012-09-11 16:01:22 -0400234 if (ret != 0)
235 break;
NeilBrownbf4b4902017-09-11 13:15:50 +1000236 ret = nfs_file_fsync_commit(file, datasync);
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500237 if (ret != 0)
238 break;
239 ret = pnfs_sync_inode(inode, !!datasync);
240 if (ret != 0)
241 break;
242 if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
243 break;
Trond Myklebustdcfc4f22012-09-11 16:19:38 -0400244 /*
245 * If nfs_file_fsync_commit detected a server reboot, then
246 * resend all dirty pages that might have been covered by
247 * the NFS_CONTEXT_RESEND_WRITES flag
248 */
249 start = 0;
250 end = LLONG_MAX;
Trond Myklebust2197e9b2020-01-06 15:25:03 -0500251 }
Trond Myklebust05990d12012-09-11 16:01:22 -0400252
Trond Myklebustf4ce1292013-08-19 18:59:33 -0400253 trace_nfs_fsync_exit(inode, ret);
Trond Myklebustaf7fa162010-07-31 14:29:06 -0400254 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
Christoph Hellwig4ff79bc2016-03-02 17:35:54 +0100256EXPORT_SYMBOL_GPL(nfs_file_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258/*
Peter Staubach38c73042009-08-10 08:54:16 -0400259 * Decide whether a read/modify/write cycle may be more efficient
260 * then a modify/write/read cycle when writing to a page in the
261 * page cache.
262 *
Kazuo Ito2cde04e2019-02-14 18:39:03 +0900263 * Some pNFS layout drivers can only read/write at a certain block
264 * granularity like all block devices and therefore we must perform
265 * read/modify/write whenever a page hasn't read yet and the data
266 * to be written there is not aligned to a block boundary and/or
267 * smaller than the block size.
268 *
Peter Staubach38c73042009-08-10 08:54:16 -0400269 * The modify/write/read cycle may occur if a page is read before
270 * being completely filled by the writer. In this situation, the
271 * page must be completely written to stable storage on the server
272 * before it can be refilled by reading in the page from the server.
273 * This can lead to expensive, small, FILE_SYNC mode writes being
274 * done.
275 *
276 * It may be more efficient to read the page first if the file is
277 * open for reading in addition to writing, the page is not marked
278 * as Uptodate, it is not dirty or waiting to be committed,
279 * indicating that it was previously allocated and then modified,
280 * that there were valid bytes of data in that range of the file,
281 * and that the new data won't completely replace the old data in
282 * that range of the file.
283 */
Kazuo Ito2cde04e2019-02-14 18:39:03 +0900284static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
Peter Staubach38c73042009-08-10 08:54:16 -0400285{
286 unsigned int pglen = nfs_page_length(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300287 unsigned int offset = pos & (PAGE_SIZE - 1);
Peter Staubach38c73042009-08-10 08:54:16 -0400288 unsigned int end = offset + len;
289
Kazuo Ito2cde04e2019-02-14 18:39:03 +0900290 return !pglen || (end >= pglen && !offset);
291}
Christoph Hellwig612aa9832014-09-10 08:23:30 -0700292
Kazuo Ito2cde04e2019-02-14 18:39:03 +0900293static bool nfs_want_read_modify_write(struct file *file, struct page *page,
294 loff_t pos, unsigned int len)
295{
296 /*
297 * Up-to-date pages, those with ongoing or full-page write
298 * don't need read/modify/write
299 */
300 if (PageUptodate(page) || PagePrivate(page) ||
301 nfs_full_page_write(page, pos, len))
302 return false;
303
304 if (pnfs_ld_read_whole_page(file->f_mapping->host))
305 return true;
306 /* Open for reading too? */
307 if (file->f_mode & FMODE_READ)
308 return true;
309 return false;
Peter Staubach38c73042009-08-10 08:54:16 -0400310}
311
312/*
Nick Piggin4899f9c2007-10-16 01:25:16 -0700313 * This does the "real" work of the write. We must allocate and lock the
314 * page to be sent back to the generic routine, which then copies the
315 * data from user space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 *
317 * If the writer ends up delaying the write, the writer needs to
318 * increment the page use counts until he is done with the page.
319 */
Nick Piggin4899f9c2007-10-16 01:25:16 -0700320static int nfs_write_begin(struct file *file, struct address_space *mapping,
321 loff_t pos, unsigned len, unsigned flags,
322 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
Nick Piggin4899f9c2007-10-16 01:25:16 -0700324 int ret;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300325 pgoff_t index = pos >> PAGE_SHIFT;
Nick Piggin4899f9c2007-10-16 01:25:16 -0700326 struct page *page;
Peter Staubach38c73042009-08-10 08:54:16 -0400327 int once_thru = 0;
Nick Piggin4899f9c2007-10-16 01:25:16 -0700328
Niels de Vos1e8968c2013-12-17 18:20:16 +0100329 dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
Al Viro6de14722013-09-16 10:53:17 -0400330 file, mapping->host->i_ino, len, (long long) pos);
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400331
Peter Staubach38c73042009-08-10 08:54:16 -0400332start:
Nick Piggin54566b22009-01-04 12:00:53 -0800333 page = grab_cache_page_write_begin(mapping, index, flags);
Nick Piggin4899f9c2007-10-16 01:25:16 -0700334 if (!page)
335 return -ENOMEM;
336 *pagep = page;
337
338 ret = nfs_flush_incompatible(file, page);
339 if (ret) {
340 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300341 put_page(page);
Peter Staubach38c73042009-08-10 08:54:16 -0400342 } else if (!once_thru &&
343 nfs_want_read_modify_write(file, page, pos, len)) {
344 once_thru = 1;
345 ret = nfs_readpage(file, page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300346 put_page(page);
Peter Staubach38c73042009-08-10 08:54:16 -0400347 if (!ret)
348 goto start;
Nick Piggin4899f9c2007-10-16 01:25:16 -0700349 }
350 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
Nick Piggin4899f9c2007-10-16 01:25:16 -0700353static int nfs_write_end(struct file *file, struct address_space *mapping,
354 loff_t pos, unsigned len, unsigned copied,
355 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300357 unsigned offset = pos & (PAGE_SIZE - 1);
Andy Adamsondc248262013-08-14 11:59:16 -0400358 struct nfs_open_context *ctx = nfs_file_open_context(file);
Nick Piggin4899f9c2007-10-16 01:25:16 -0700359 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
Niels de Vos1e8968c2013-12-17 18:20:16 +0100361 dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
Al Viro6de14722013-09-16 10:53:17 -0400362 file, mapping->host->i_ino, len, (long long) pos);
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400363
Trond Myklebustefc91ed2008-06-10 18:31:00 -0400364 /*
365 * Zero any uninitialised parts of the page, and then mark the page
366 * as up to date if it turns out that we're extending the file.
367 */
368 if (!PageUptodate(page)) {
369 unsigned pglen = nfs_page_length(page);
Al Viroc0cf3ef2016-09-05 21:42:32 -0400370 unsigned end = offset + copied;
Trond Myklebustefc91ed2008-06-10 18:31:00 -0400371
372 if (pglen == 0) {
373 zero_user_segments(page, 0, offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300374 end, PAGE_SIZE);
Trond Myklebustefc91ed2008-06-10 18:31:00 -0400375 SetPageUptodate(page);
376 } else if (end >= pglen) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300377 zero_user_segment(page, end, PAGE_SIZE);
Trond Myklebustefc91ed2008-06-10 18:31:00 -0400378 if (offset == 0)
379 SetPageUptodate(page);
380 } else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300381 zero_user_segment(page, pglen, PAGE_SIZE);
Trond Myklebustefc91ed2008-06-10 18:31:00 -0400382 }
383
Nick Piggin4899f9c2007-10-16 01:25:16 -0700384 status = nfs_updatepage(file, page, offset, copied);
Nick Piggin4899f9c2007-10-16 01:25:16 -0700385
386 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300387 put_page(page);
Nick Piggin4899f9c2007-10-16 01:25:16 -0700388
Chuck Lever3d509e52007-12-20 14:55:04 -0500389 if (status < 0)
390 return status;
Andy Adamson2701d082012-05-24 13:13:24 -0400391 NFS_I(mapping->host)->write_io += copied;
Andy Adamsondc248262013-08-14 11:59:16 -0400392
Scott Mayhewce52914e2016-06-07 15:14:48 -0400393 if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
Andy Adamsondc248262013-08-14 11:59:16 -0400394 status = nfs_wb_all(mapping->host);
395 if (status < 0)
396 return status;
397 }
398
Chuck Lever3d509e52007-12-20 14:55:04 -0500399 return copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
David Howells6b9b3512009-04-03 16:42:41 +0100402/*
403 * Partially or wholly invalidate a page
404 * - Release the private state associated with a page if undergoing complete
405 * page invalidation
David Howells545db452009-04-03 16:42:44 +0100406 * - Called if either PG_private or PG_fscache is set on the page
David Howells6b9b3512009-04-03 16:42:41 +0100407 * - Caller holds page lock
408 */
Lukas Czernerd47992f2013-05-21 23:17:23 -0400409static void nfs_invalidate_page(struct page *page, unsigned int offset,
410 unsigned int length)
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500411{
Lukas Czernerd47992f2013-05-21 23:17:23 -0400412 dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
413 page, offset, length);
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400414
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300415 if (offset != 0 || length < PAGE_SIZE)
Trond Myklebust1c759502006-10-09 16:18:38 -0400416 return;
Trond Myklebustd2ccddf2006-05-31 01:13:38 -0400417 /* Cancel any unstarted writes on this page */
Mel Gormand56b4dd2012-07-31 16:45:06 -0700418 nfs_wb_page_cancel(page_file_mapping(page)->host, page);
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500419 wait_on_page_fscache(page);
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500420}
421
David Howells6b9b3512009-04-03 16:42:41 +0100422/*
423 * Attempt to release the private state associated with a page
David Howells545db452009-04-03 16:42:44 +0100424 * - Called if either PG_private or PG_fscache is set on the page
David Howells6b9b3512009-04-03 16:42:41 +0100425 * - Caller holds page lock
426 * - Return true (may release page) or false (may not)
427 */
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500428static int nfs_release_page(struct page *page, gfp_t gfp)
429{
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400430 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
431
Trond Myklebuste3db7692007-01-10 23:15:39 -0800432 /* If PagePrivate() is set, then the page is not freeable */
David Howells545db452009-04-03 16:42:44 +0100433 if (PagePrivate(page))
434 return 0;
435 return nfs_fscache_release_page(page, gfp);
Trond Myklebuste3db7692007-01-10 23:15:39 -0800436}
437
Mel Gormanf919b192013-07-03 15:02:06 -0700438static void nfs_check_dirty_writeback(struct page *page,
439 bool *dirty, bool *writeback)
440{
441 struct nfs_inode *nfsi;
442 struct address_space *mapping = page_file_mapping(page);
443
444 if (!mapping || PageSwapCache(page))
445 return;
446
447 /*
448 * Check if an unstable page is currently being committed and
449 * if so, have the VM treat it as if the page is under writeback
450 * so it will not block due to pages that will shortly be freeable.
451 */
452 nfsi = NFS_I(mapping->host);
Trond Myklebustaf7cf052015-09-29 20:34:05 -0400453 if (atomic_read(&nfsi->commit_info.rpcs_out)) {
Mel Gormanf919b192013-07-03 15:02:06 -0700454 *writeback = true;
455 return;
456 }
457
458 /*
459 * If PagePrivate() is set, then the page is not freeable and as the
460 * inode is not being committed, it's not going to be cleaned in the
461 * near future so treat it as dirty
462 */
463 if (PagePrivate(page))
464 *dirty = true;
465}
466
David Howells6b9b3512009-04-03 16:42:41 +0100467/*
468 * Attempt to clear the private state associated with a page when an error
469 * occurs that requires the cached contents of an inode to be written back or
470 * destroyed
David Howells545db452009-04-03 16:42:44 +0100471 * - Called if either PG_private or fscache is set on the page
David Howells6b9b3512009-04-03 16:42:41 +0100472 * - Caller holds page lock
473 * - Return 0 if successful, -error otherwise
474 */
Trond Myklebuste3db7692007-01-10 23:15:39 -0800475static int nfs_launder_page(struct page *page)
476{
Mel Gormand56b4dd2012-07-31 16:45:06 -0700477 struct inode *inode = page_file_mapping(page)->host;
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400478
479 dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
480 inode->i_ino, (long long)page_offset(page));
481
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500482 wait_on_page_fscache(page);
Trond Myklebustc373fff2017-04-26 12:26:22 -0400483 return nfs_wb_page(inode, page);
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500484}
485
Mel Gormana564b8f2012-07-31 16:45:12 -0700486static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
487 sector_t *span)
488{
Murphy Zhoubd89bc62020-01-02 16:04:26 +0800489 unsigned long blocks;
490 long long isize;
Jeff Laytondad2b012014-09-10 09:03:55 -0400491 struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
Murphy Zhoubd89bc62020-01-02 16:04:26 +0800492 struct inode *inode = file->f_mapping->host;
493
494 spin_lock(&inode->i_lock);
495 blocks = inode->i_blocks;
496 isize = inode->i_size;
497 spin_unlock(&inode->i_lock);
498 if (blocks*512 < isize) {
499 pr_warn("swap activate: swapfile has holes\n");
500 return -EINVAL;
501 }
Jeff Laytondad2b012014-09-10 09:03:55 -0400502
Mel Gormana564b8f2012-07-31 16:45:12 -0700503 *span = sis->pages;
Jeff Laytondad2b012014-09-10 09:03:55 -0400504
Jeff Layton3c87ef62015-06-03 16:14:25 -0400505 return rpc_clnt_swap_activate(clnt);
Mel Gormana564b8f2012-07-31 16:45:12 -0700506}
507
508static void nfs_swap_deactivate(struct file *file)
509{
Jeff Laytondad2b012014-09-10 09:03:55 -0400510 struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
511
Jeff Layton3c87ef62015-06-03 16:14:25 -0400512 rpc_clnt_swap_deactivate(clnt);
Mel Gormana564b8f2012-07-31 16:45:12 -0700513}
Mel Gormana564b8f2012-07-31 16:45:12 -0700514
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700515const struct address_space_operations nfs_file_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 .readpage = nfs_readpage,
517 .readpages = nfs_readpages,
Trond Myklebust9cccef92007-07-22 17:09:05 -0400518 .set_page_dirty = __set_page_dirty_nobuffers,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 .writepage = nfs_writepage,
520 .writepages = nfs_writepages,
Nick Piggin4899f9c2007-10-16 01:25:16 -0700521 .write_begin = nfs_write_begin,
522 .write_end = nfs_write_end,
Trond Myklebustcd52ed32006-03-20 13:44:04 -0500523 .invalidatepage = nfs_invalidate_page,
524 .releasepage = nfs_release_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 .direct_IO = nfs_direct_IO,
Chao Yuf844cd02016-09-20 13:59:07 +0800526#ifdef CONFIG_MIGRATION
Trond Myklebust074cc1d2009-08-10 08:54:13 -0400527 .migratepage = nfs_migrate_page,
Chao Yuf844cd02016-09-20 13:59:07 +0800528#endif
Trond Myklebuste3db7692007-01-10 23:15:39 -0800529 .launder_page = nfs_launder_page,
Mel Gormanf919b192013-07-03 15:02:06 -0700530 .is_dirty_writeback = nfs_check_dirty_writeback,
Andi Kleenf590f332009-09-16 11:50:17 +0200531 .error_remove_page = generic_error_remove_page,
Mel Gormana564b8f2012-07-31 16:45:12 -0700532 .swap_activate = nfs_swap_activate,
533 .swap_deactivate = nfs_swap_deactivate,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534};
535
David Howells6b9b3512009-04-03 16:42:41 +0100536/*
537 * Notification that a PTE pointing to an NFS page is about to be made
538 * writable, implying that someone is about to modify the page through a
539 * shared-writable mapping
540 */
Souptick Joarder01a36842018-07-02 20:57:09 +0530541static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
Trond Myklebust94387fb2007-07-22 17:09:05 -0400542{
Nick Pigginc2ec1752009-03-31 15:23:21 -0700543 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800544 struct file *filp = vmf->vma->vm_file;
Al Viro6de14722013-09-16 10:53:17 -0400545 struct inode *inode = file_inode(filp);
Trond Myklebust94387fb2007-07-22 17:09:05 -0400546 unsigned pagelen;
Souptick Joarder01a36842018-07-02 20:57:09 +0530547 vm_fault_t ret = VM_FAULT_NOPAGE;
Nick Piggin4899f9c2007-10-16 01:25:16 -0700548 struct address_space *mapping;
Trond Myklebust94387fb2007-07-22 17:09:05 -0400549
Niels de Vos1e8968c2013-12-17 18:20:16 +0100550 dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
Al Viro6de14722013-09-16 10:53:17 -0400551 filp, filp->f_mapping->host->i_ino,
Chuck Leverb7eaefa2008-06-11 17:55:50 -0400552 (long long)page_offset(page));
553
Trond Myklebust9a773e72016-06-23 11:09:04 -0400554 sb_start_pagefault(inode->i_sb);
555
David Howells545db452009-04-03 16:42:44 +0100556 /* make sure the cache has finished storing the page */
Dave Wysochanskia6b5a282020-11-14 13:43:54 -0500557 if (PageFsCache(page) &&
558 wait_on_page_fscache_killable(vmf->page) < 0) {
559 ret = VM_FAULT_RETRY;
560 goto out;
561 }
David Howells545db452009-04-03 16:42:44 +0100562
Trond Myklebustef070dc2015-03-03 00:06:35 -0500563 wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
564 nfs_wait_bit_killable, TASK_KILLABLE);
565
Trond Myklebust94387fb2007-07-22 17:09:05 -0400566 lock_page(page);
Mel Gormand56b4dd2012-07-31 16:45:06 -0700567 mapping = page_file_mapping(page);
Al Viro6de14722013-09-16 10:53:17 -0400568 if (mapping != inode->i_mapping)
Trond Myklebust8b1f9ee2008-01-22 17:13:06 -0500569 goto out_unlock;
570
Trond Myklebust2aeb98f2012-01-17 22:04:26 -0500571 wait_on_page_writeback(page);
572
Trond Myklebust94387fb2007-07-22 17:09:05 -0400573 pagelen = nfs_page_length(page);
Trond Myklebust8b1f9ee2008-01-22 17:13:06 -0500574 if (pagelen == 0)
575 goto out_unlock;
576
Trond Myklebustbc4866b2010-10-04 17:59:08 -0400577 ret = VM_FAULT_LOCKED;
578 if (nfs_flush_incompatible(filp, page) == 0 &&
579 nfs_updatepage(filp, page, 0, pagelen) == 0)
580 goto out;
Trond Myklebust8b1f9ee2008-01-22 17:13:06 -0500581
Trond Myklebustbc4866b2010-10-04 17:59:08 -0400582 ret = VM_FAULT_SIGBUS;
Trond Myklebust8b1f9ee2008-01-22 17:13:06 -0500583out_unlock:
Trond Myklebust94387fb2007-07-22 17:09:05 -0400584 unlock_page(page);
Trond Myklebustbc4866b2010-10-04 17:59:08 -0400585out:
Trond Myklebust9a773e72016-06-23 11:09:04 -0400586 sb_end_pagefault(inode->i_sb);
Trond Myklebustbc4866b2010-10-04 17:59:08 -0400587 return ret;
Trond Myklebust94387fb2007-07-22 17:09:05 -0400588}
589
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400590static const struct vm_operations_struct nfs_file_vm_ops = {
Trond Myklebust94387fb2007-07-22 17:09:05 -0400591 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700592 .map_pages = filemap_map_pages,
Trond Myklebust94387fb2007-07-22 17:09:05 -0400593 .page_mkwrite = nfs_vm_page_mkwrite,
594};
595
Scott Mayhewce368532020-08-01 07:10:39 -0400596static int nfs_need_check_write(struct file *filp, struct inode *inode,
597 int error)
Trond Myklebust7b159fc2007-07-25 14:09:54 -0400598{
599 struct nfs_open_context *ctx;
600
Trond Myklebustcd3758e2007-08-10 17:44:32 -0400601 ctx = nfs_file_open_context(filp);
Scott Mayhewce368532020-08-01 07:10:39 -0400602 if (nfs_error_is_fatal_on_server(error) ||
603 nfs_ctx_key_to_expire(ctx, inode))
Trond Myklebust7b159fc2007-07-25 14:09:54 -0400604 return 1;
605 return 0;
606}
607
Al Viroedaf4362014-04-03 14:07:25 -0400608ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
Al Viro6de14722013-09-16 10:53:17 -0400610 struct file *file = iocb->ki_filp;
611 struct inode *inode = file_inode(file);
Trond Myklebusted7bcdb2021-02-12 16:49:48 -0500612 unsigned int mntflags = NFS_SERVER(inode)->flags;
613 ssize_t result, written;
Scott Mayhewce368532020-08-01 07:10:39 -0400614 errseq_t since;
615 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Al Viro6de14722013-09-16 10:53:17 -0400617 result = nfs_key_timeout_notify(file, inode);
Andy Adamsondc248262013-08-14 11:59:16 -0400618 if (result)
619 return result;
620
Trond Myklebust89698b22016-06-23 10:35:48 -0400621 if (iocb->ki_flags & IOCB_DIRECT)
Al Viro65a4a1c2015-04-09 14:11:08 -0400622 return nfs_file_direct_write(iocb, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
Al Viro619d30b2014-03-04 21:53:33 -0500624 dprintk("NFS: write(%pD2, %zu@%Ld)\n",
Trond Myklebust18290652016-06-23 15:00:42 -0400625 file, iov_iter_count(from), (long long) iocb->ki_pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (IS_SWAPFILE(inode))
628 goto out_swapfile;
Trond Myklebust7d52e862005-06-22 17:16:30 +0000629 /*
630 * O_APPEND implies that we must revalidate the file length.
631 */
Trond Myklebustfc9dc402021-02-08 08:55:46 -0500632 if (iocb->ki_flags & IOCB_APPEND || iocb->ki_pos > i_size_read(inode)) {
Al Viro6de14722013-09-16 10:53:17 -0400633 result = nfs_revalidate_file_size(inode, file);
Trond Myklebust7d52e862005-06-22 17:16:30 +0000634 if (result)
635 goto out;
Trond Myklebustfe51beec2005-06-22 17:16:30 +0000636 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Trond Myklebust28aa2f92021-02-08 08:55:47 -0500638 nfs_clear_invalid_mapping(file->f_mapping);
639
Scott Mayhewce368532020-08-01 07:10:39 -0400640 since = filemap_sample_wb_err(file->f_mapping);
Trond Myklebusta5864c92016-06-03 17:07:19 -0400641 nfs_start_io_write(inode);
Trond Myklebust18290652016-06-23 15:00:42 -0400642 result = generic_write_checks(iocb, from);
643 if (result > 0) {
644 current->backing_dev_info = inode_to_bdi(inode);
645 result = generic_perform_write(file, from, iocb->ki_pos);
646 current->backing_dev_info = NULL;
647 }
Trond Myklebusta5864c92016-06-03 17:07:19 -0400648 nfs_end_io_write(inode);
Trond Myklebust18290652016-06-23 15:00:42 -0400649 if (result <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 goto out;
651
Trond Myklebustc49edec2016-09-03 12:05:31 -0400652 written = result;
Trond Myklebust18290652016-06-23 15:00:42 -0400653 iocb->ki_pos += written;
Trond Myklebusted7bcdb2021-02-12 16:49:48 -0500654
655 if (mntflags & NFS_MOUNT_WRITE_EAGER) {
656 result = filemap_fdatawrite_range(file->f_mapping,
657 iocb->ki_pos - written,
658 iocb->ki_pos - 1);
659 if (result < 0)
660 goto out;
661 }
662 if (mntflags & NFS_MOUNT_WRITE_WAIT) {
663 result = filemap_fdatawait_range(file->f_mapping,
664 iocb->ki_pos - written,
665 iocb->ki_pos - 1);
666 if (result < 0)
667 goto out;
668 }
tarangg@amazon.come973b1a52017-09-07 09:29:23 -0400669 result = generic_write_sync(iocb, written);
670 if (result < 0)
671 goto out;
Chuck Lever7e381172010-02-01 14:17:41 -0500672
Trond Myklebust7e94d6c2015-08-17 16:55:18 -0500673 /* Return error values */
Scott Mayhewce368532020-08-01 07:10:39 -0400674 error = filemap_check_wb_err(file->f_mapping, since);
675 if (nfs_need_check_write(file, inode, error)) {
Trond Myklebustaded8d72019-04-07 13:59:04 -0400676 int err = nfs_wb_all(inode);
Trond Myklebust200baa22006-12-05 00:35:40 -0500677 if (err < 0)
678 result = err;
679 }
Trond Myklebust18290652016-06-23 15:00:42 -0400680 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681out:
682 return result;
683
684out_swapfile:
685 printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
Anna Schumaker89658c42019-11-08 16:02:24 -0500686 return -ETXTBSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400688EXPORT_SYMBOL_GPL(nfs_file_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400690static int
691do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692{
693 struct inode *inode = filp->f_mapping->host;
694 int status = 0;
Sergey Vlasov21ac19d2010-11-28 21:04:05 +0000695 unsigned int saved_type = fl->fl_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
Trond Myklebust039c4d72005-10-18 14:20:16 -0700697 /* Try local locking first */
J. Bruce Fields6d34ac12007-05-11 16:09:32 -0400698 posix_test_lock(filp, fl);
699 if (fl->fl_type != F_UNLCK) {
700 /* found a conflict */
Trond Myklebust039c4d72005-10-18 14:20:16 -0700701 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
Sergey Vlasov21ac19d2010-11-28 21:04:05 +0000703 fl->fl_type = saved_type;
Trond Myklebust039c4d72005-10-18 14:20:16 -0700704
Bryan Schumaker011e2a72012-06-20 15:53:43 -0400705 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
Trond Myklebust039c4d72005-10-18 14:20:16 -0700706 goto out_noconflict;
707
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400708 if (is_local)
Trond Myklebust039c4d72005-10-18 14:20:16 -0700709 goto out_noconflict;
710
711 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
712out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return status;
Trond Myklebust039c4d72005-10-18 14:20:16 -0700714out_noconflict:
715 fl->fl_type = F_UNLCK;
716 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400719static int
720do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 struct inode *inode = filp->f_mapping->host;
Trond Myklebust7a8203d2013-04-08 21:49:53 -0400723 struct nfs_lock_context *l_ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 int status;
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 /*
727 * Flush all pending writes before doing anything
728 * with locks..
729 */
Trond Myklebustaded8d72019-04-07 13:59:04 -0400730 nfs_wb_all(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Trond Myklebust7a8203d2013-04-08 21:49:53 -0400732 l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
733 if (!IS_ERR(l_ctx)) {
Benjamin Coddington210c7c12016-01-06 10:40:18 -0500734 status = nfs_iocounter_wait(l_ctx);
Trond Myklebust7a8203d2013-04-08 21:49:53 -0400735 nfs_put_lock_context(l_ctx);
Benjamin Coddingtonf30cb752017-04-11 12:50:12 -0400736 /* NOTE: special case
737 * If we're signalled while cleaning up locks on process exit, we
738 * still need to complete the unlock.
739 */
740 if (status < 0 && !(fl->fl_flags & FL_CLOSE))
Trond Myklebust7a8203d2013-04-08 21:49:53 -0400741 return status;
742 }
743
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400744 /*
745 * Use local locking if mounted with "-onolock" or with appropriate
746 * "-olocal_lock="
747 */
748 if (!is_local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
750 else
Jeff Layton75575dd2016-09-17 18:17:32 -0400751 status = locks_lock_file_wait(filp, fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 return status;
753}
754
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400755static int
756do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757{
758 struct inode *inode = filp->f_mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 int status;
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /*
762 * Flush all pending writes before doing anything
763 * with locks..
764 */
Trond Myklebust29884df2005-12-13 16:13:54 -0500765 status = nfs_sync_mapping(filp->f_mapping);
766 if (status != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 goto out;
768
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400769 /*
770 * Use local locking if mounted with "-onolock" or with appropriate
771 * "-olocal_lock="
772 */
773 if (!is_local)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 status = NFS_PROTO(inode)->lock(filp, cmd, fl);
Trond Myklebustc4d7c402008-04-01 20:26:52 -0400775 else
Jeff Layton75575dd2016-09-17 18:17:32 -0400776 status = locks_lock_file_wait(filp, fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (status < 0)
778 goto out;
Ricardo Labiaga6b967242010-10-12 16:30:05 -0700779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 /*
NeilBrown779eafa2017-08-18 17:12:52 +1000781 * Invalidate cache to prevent missing any changes. If
782 * the file is mapped, clear the page cache as well so
783 * those mappings will be loaded.
Ricardo Labiaga6b967242010-10-12 16:30:05 -0700784 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 * This makes locking act as a cache coherency point.
786 */
Trond Myklebust29884df2005-12-13 16:13:54 -0500787 nfs_sync_mapping(filp->f_mapping);
NeilBrown779eafa2017-08-18 17:12:52 +1000788 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
NeilBrown442ce042017-07-24 13:18:50 +1000789 nfs_zap_caches(inode);
NeilBrown779eafa2017-08-18 17:12:52 +1000790 if (mapping_mapped(filp->f_mapping))
791 nfs_revalidate_mapping(inode, filp->f_mapping);
792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 return status;
795}
796
797/*
798 * Lock a (portion of) a file
799 */
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -0400800int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
Chuck Lever6da24bc2008-06-11 17:55:58 -0400802 struct inode *inode = filp->f_mapping->host;
Trond Myklebust21162712008-05-20 19:34:39 -0400803 int ret = -ENOLCK;
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400804 int is_local = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
Al Viro6de14722013-09-16 10:53:17 -0400806 dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
807 filp, fl->fl_type, fl->fl_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 (long long)fl->fl_start, (long long)fl->fl_end);
Chuck Lever6da24bc2008-06-11 17:55:58 -0400809
Chuck Lever91d5b472006-03-20 13:44:14 -0500810 nfs_inc_stats(inode, NFSIOS_VFSLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
J. Bruce Fieldsbb0a55b2021-08-20 17:02:06 -0400812 if (fl->fl_flags & FL_RECLAIM)
813 return -ENOGRACE;
814
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400815 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
816 is_local = 1;
817
Trond Myklebust21162712008-05-20 19:34:39 -0400818 if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
819 ret = NFS_PROTO(inode)->lock_check_bounds(fl);
820 if (ret < 0)
821 goto out_err;
822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 if (IS_GETLK(cmd))
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400825 ret = do_getlk(filp, cmd, fl, is_local);
Trond Myklebust21162712008-05-20 19:34:39 -0400826 else if (fl->fl_type == F_UNLCK)
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400827 ret = do_unlk(filp, cmd, fl, is_local);
Trond Myklebust21162712008-05-20 19:34:39 -0400828 else
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400829 ret = do_setlk(filp, cmd, fl, is_local);
Trond Myklebust21162712008-05-20 19:34:39 -0400830out_err:
831 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400833EXPORT_SYMBOL_GPL(nfs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835/*
836 * Lock a (portion of) a file
837 */
Bryan Schumakerce4ef7c2012-07-16 16:39:15 -0400838int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400840 struct inode *inode = filp->f_mapping->host;
841 int is_local = 0;
842
Al Viro6de14722013-09-16 10:53:17 -0400843 dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
844 filp, fl->fl_type, fl->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 if (!(fl->fl_flags & FL_FLOCK))
847 return -ENOLCK;
848
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400849 if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
850 is_local = 1;
851
Benjamin Coddingtonfcfa4472017-11-10 06:27:49 -0500852 /* We're simulating flock() locks using posix locks on the server */
853 if (fl->fl_type == F_UNLCK)
Suresh Jayaraman5eebde22010-09-23 08:55:58 -0400854 return do_unlk(filp, cmd, fl, is_local);
855 return do_setlk(filp, cmd, fl, is_local);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
Bryan Schumaker89d77c82012-07-30 16:05:25 -0400857EXPORT_SYMBOL_GPL(nfs_flock);
J. Bruce Fields370f6592007-06-08 15:23:34 -0400858
Jeff Layton04869582011-11-04 13:31:22 -0400859const struct file_operations nfs_file_operations = {
860 .llseek = nfs_file_llseek,
Al Viro3aa2d192014-04-02 20:14:12 -0400861 .read_iter = nfs_file_read,
Al Viroedaf4362014-04-03 14:07:25 -0400862 .write_iter = nfs_file_write,
Jeff Layton04869582011-11-04 13:31:22 -0400863 .mmap = nfs_file_mmap,
864 .open = nfs_file_open,
865 .flush = nfs_file_flush,
866 .release = nfs_file_release,
867 .fsync = nfs_file_fsync,
868 .lock = nfs_lock,
869 .flock = nfs_flock,
Al Viro82c156f2016-09-22 23:35:42 -0400870 .splice_read = generic_file_splice_read,
Al Viro4da54c22014-04-05 04:37:17 -0400871 .splice_write = iter_file_splice_write,
Jeff Layton04869582011-11-04 13:31:22 -0400872 .check_flags = nfs_check_flags,
Jeff Layton1c994a02014-08-27 06:49:41 -0400873 .setlease = simple_nosetlease,
Jeff Layton04869582011-11-04 13:31:22 -0400874};
Bryan Schumakerddda8e02012-07-30 16:05:23 -0400875EXPORT_SYMBOL_GPL(nfs_file_operations);