blob: 24659667d5cb8941eea96a8f5b5153d51feca6be [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +00007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +11009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110014#include "xfs_trans.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000015#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000016#include "xfs_bmap.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100017#include "xfs_bmap_util.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100018#include "xfs_dir2.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100019#include "xfs_dir2_priv.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050020#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000021#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100023#include "xfs_icache.h"
Christoph Hellwig781355c2015-02-16 11:59:50 +110024#include "xfs_pnfs.h"
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100025#include "xfs_iomap.h"
Darrick J. Wong0613f162016-10-03 09:11:37 -070026#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010028#include <linux/falloc.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040029#include <linux/backing-dev.h>
Christoph Hellwiga39e5962017-11-01 16:36:47 +010030#include <linux/mman.h>
Jan Kara40144e42019-08-29 09:04:12 -070031#include <linux/fadvise.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040033static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Christoph Hellwig8add71c2015-02-02 09:53:56 +110035int
36xfs_update_prealloc_flags(
37 struct xfs_inode *ip,
38 enum xfs_prealloc_flags flags)
39{
40 struct xfs_trans *tp;
41 int error;
42
Christoph Hellwig253f4912016-04-06 09:19:55 +100043 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
44 0, 0, 0, &tp);
45 if (error)
Christoph Hellwig8add71c2015-02-02 09:53:56 +110046 return error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110047
48 xfs_ilock(ip, XFS_ILOCK_EXCL);
49 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
50
51 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
Dave Chinnerc19b3b052016-02-09 16:54:58 +110052 VFS_I(ip)->i_mode &= ~S_ISUID;
53 if (VFS_I(ip)->i_mode & S_IXGRP)
54 VFS_I(ip)->i_mode &= ~S_ISGID;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110055 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
56 }
57
58 if (flags & XFS_PREALLOC_SET)
59 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
60 if (flags & XFS_PREALLOC_CLEAR)
61 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
62
63 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
64 if (flags & XFS_PREALLOC_SYNC)
65 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +100066 return xfs_trans_commit(tp);
Christoph Hellwig8add71c2015-02-02 09:53:56 +110067}
68
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000069/*
70 * Fsync operations on directories are much simpler than on regular files,
71 * as there is no file data to flush, and thus also no need for explicit
72 * cache flush operations, and there are no non-transaction metadata updates
73 * on directories either.
74 */
75STATIC int
76xfs_dir_fsync(
77 struct file *file,
78 loff_t start,
79 loff_t end,
80 int datasync)
81{
82 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
83 struct xfs_mount *mp = ip->i_mount;
84 xfs_lsn_t lsn = 0;
85
86 trace_xfs_dir_fsync(ip);
87
88 xfs_ilock(ip, XFS_ILOCK_SHARED);
89 if (xfs_ipincount(ip))
90 lsn = ip->i_itemp->ili_last_lsn;
91 xfs_iunlock(ip, XFS_ILOCK_SHARED);
92
93 if (!lsn)
94 return 0;
Christoph Hellwig656de4f2018-03-13 23:15:28 -070095 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000096}
97
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000098STATIC int
99xfs_file_fsync(
100 struct file *file,
Josef Bacik02c24a82011-07-16 20:44:56 -0400101 loff_t start,
102 loff_t end,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000103 int datasync)
104{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200105 struct inode *inode = file->f_mapping->host;
106 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000107 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000108 int error = 0;
109 int log_flushed = 0;
Christoph Hellwigb1037052011-09-19 14:55:51 +0000110 xfs_lsn_t lsn = 0;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000111
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000112 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000113
Jeff Layton1b180272017-07-06 07:02:30 -0400114 error = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -0400115 if (error)
116 return error;
117
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000118 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000119 return -EIO;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000120
121 xfs_iflags_clear(ip, XFS_ITRUNCATED);
122
Dave Chinner2291dab2016-12-09 16:49:54 +1100123 /*
124 * If we have an RT and/or log subvolume we need to make sure to flush
125 * the write cache the device used for file data first. This is to
126 * ensure newly written file data make it to disk before logging the new
127 * inode size in case of an extending write.
128 */
129 if (XFS_IS_REALTIME_INODE(ip))
130 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
131 else if (mp->m_logdev_targp != mp->m_ddev_targp)
132 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000133
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000134 /*
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100135 * All metadata updates are logged, which means that we just have to
136 * flush the log up to the latest LSN that touched the inode. If we have
137 * concurrent fsync/fdatasync() calls, we need them to all block on the
138 * log force before we clear the ili_fsync_fields field. This ensures
139 * that we don't get a racing sync operation that does not wait for the
140 * metadata to hit the journal before returning. If we race with
141 * clearing the ili_fsync_fields, then all that will happen is the log
142 * force will do nothing as the lsn will already be on disk. We can't
143 * race with setting ili_fsync_fields because that is done under
144 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
145 * until after the ili_fsync_fields is cleared.
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000146 */
147 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000148 if (xfs_ipincount(ip)) {
149 if (!datasync ||
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100150 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000151 lsn = ip->i_itemp->ili_last_lsn;
152 }
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000153
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100154 if (lsn) {
Christoph Hellwig656de4f2018-03-13 23:15:28 -0700155 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100156 ip->i_itemp->ili_fsync_fields = 0;
157 }
158 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigb1037052011-09-19 14:55:51 +0000159
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000160 /*
161 * If we only have a single device, and the log force about was
162 * a no-op we might have to flush the data device cache here.
163 * This can only happen for fdatasync/O_DSYNC if we were overwriting
164 * an already allocated file and thus do not have any metadata to
165 * commit.
166 */
Dave Chinner2291dab2016-12-09 16:49:54 +1100167 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
168 mp->m_logdev_targp == mp->m_ddev_targp)
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000169 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000170
Dave Chinner24513372014-06-25 14:58:08 +1000171 return error;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000172}
173
Christoph Hellwig00258e32010-02-15 09:44:47 +0000174STATIC ssize_t
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000175xfs_file_dio_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000176 struct kiocb *iocb,
Al Virob4f5d2c2014-04-02 14:37:59 -0400177 struct iov_iter *to)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000178{
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100179 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000180 size_t count = iov_iter_count(to);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100181 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000182
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000183 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000184
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000185 if (!count)
186 return 0; /* skip atime */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000187
Christoph Hellwiga447d7c2016-10-03 09:47:34 +1100188 file_accessed(iocb->ki_filp);
189
Christoph Hellwig65523212016-11-30 14:33:25 +1100190 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig690c2a32019-10-19 09:09:45 -0700191 ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
192 is_sync_kiocb(iocb));
Christoph Hellwig65523212016-11-30 14:33:25 +1100193 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100194
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000195 return ret;
196}
197
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000198static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000199xfs_file_dax_read(
200 struct kiocb *iocb,
201 struct iov_iter *to)
202{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000203 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000204 size_t count = iov_iter_count(to);
205 ssize_t ret = 0;
206
207 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
208
209 if (!count)
210 return 0; /* skip atime */
211
Christoph Hellwig942491c2017-10-23 18:31:50 -0700212 if (iocb->ki_flags & IOCB_NOWAIT) {
213 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500214 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700215 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500216 xfs_ilock(ip, XFS_IOLOCK_SHARED);
217 }
Christoph Hellwig942491c2017-10-23 18:31:50 -0700218
Christoph Hellwig690c2a32019-10-19 09:09:45 -0700219 ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
Christoph Hellwig65523212016-11-30 14:33:25 +1100220 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000221
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000222 file_accessed(iocb->ki_filp);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000223 return ret;
224}
225
226STATIC ssize_t
227xfs_file_buffered_aio_read(
228 struct kiocb *iocb,
229 struct iov_iter *to)
230{
231 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
232 ssize_t ret;
233
234 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
235
Christoph Hellwig942491c2017-10-23 18:31:50 -0700236 if (iocb->ki_flags & IOCB_NOWAIT) {
237 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Christoph Hellwig91f99432017-08-29 16:13:20 +0200238 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700239 } else {
Christoph Hellwig91f99432017-08-29 16:13:20 +0200240 xfs_ilock(ip, XFS_IOLOCK_SHARED);
241 }
Al Virob4f5d2c2014-04-02 14:37:59 -0400242 ret = generic_file_read_iter(iocb, to);
Christoph Hellwig65523212016-11-30 14:33:25 +1100243 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000244
245 return ret;
246}
247
248STATIC ssize_t
249xfs_file_read_iter(
250 struct kiocb *iocb,
251 struct iov_iter *to)
252{
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000253 struct inode *inode = file_inode(iocb->ki_filp);
254 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000255 ssize_t ret = 0;
256
257 XFS_STATS_INC(mp, xs_read_calls);
258
259 if (XFS_FORCED_SHUTDOWN(mp))
260 return -EIO;
261
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000262 if (IS_DAX(inode))
263 ret = xfs_file_dax_read(iocb, to);
264 else if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000265 ret = xfs_file_dio_aio_read(iocb, to);
266 else
267 ret = xfs_file_buffered_aio_read(iocb, to);
268
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000269 if (ret > 0)
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100270 XFS_STATS_ADD(mp, xs_read_bytes, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000271 return ret;
272}
273
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100274/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100275 * Common pre-write limit and setup checks.
276 *
Christoph Hellwig5bf1f262011-12-18 20:00:13 +0000277 * Called with the iolocked held either shared and exclusive according to
278 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
279 * if called for a direct write beyond i_size.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100280 */
281STATIC ssize_t
282xfs_file_aio_write_checks(
Al Viro99733fa2015-04-07 14:25:18 -0400283 struct kiocb *iocb,
284 struct iov_iter *from,
Dave Chinner4d8d1582011-01-11 10:23:42 +1100285 int *iolock)
286{
Al Viro99733fa2015-04-07 14:25:18 -0400287 struct file *file = iocb->ki_filp;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100288 struct inode *inode = file->f_mapping->host;
289 struct xfs_inode *ip = XFS_I(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400290 ssize_t error = 0;
Al Viro99733fa2015-04-07 14:25:18 -0400291 size_t count = iov_iter_count(from);
Brian Foster3136e8b2015-10-12 16:02:05 +1100292 bool drained_dio = false;
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700293 loff_t isize;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100294
Dave Chinner7271d242011-08-25 07:17:02 +0000295restart:
Al Viro3309dd02015-04-09 12:55:47 -0400296 error = generic_write_checks(iocb, from);
297 if (error <= 0)
Dave Chinner4d8d1582011-01-11 10:23:42 +1100298 return error;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100299
Dan Williams69eb5fa2018-03-20 14:42:38 -0700300 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100301 if (error)
302 return error;
303
Christoph Hellwig65523212016-11-30 14:33:25 +1100304 /*
305 * For changing security info in file_remove_privs() we need i_rwsem
306 * exclusively.
307 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200308 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100309 xfs_iunlock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200310 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100311 xfs_ilock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200312 goto restart;
313 }
Dave Chinner4d8d1582011-01-11 10:23:42 +1100314 /*
315 * If the offset is beyond the size of the file, we need to zero any
316 * blocks that fall between the existing EOF and the start of this
Christoph Hellwig2813d682011-12-18 20:00:12 +0000317 * write. If zeroing is needed and we are currently holding the
Christoph Hellwig467f7892012-03-27 10:34:47 -0400318 * iolock shared, we need to update it to exclusive which implies
319 * having to redo all checks before.
Dave Chinnerb9d59842015-04-16 22:03:07 +1000320 *
321 * We need to serialise against EOF updates that occur in IO
322 * completions here. We want to make sure that nobody is changing the
323 * size while we do this check until we have placed an IO barrier (i.e.
324 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
325 * The spinlock effectively forms a memory barrier once we have the
326 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
327 * and hence be able to correctly determine if we need to run zeroing.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100328 */
Dave Chinnerb9d59842015-04-16 22:03:07 +1000329 spin_lock(&ip->i_flags_lock);
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700330 isize = i_size_read(inode);
331 if (iocb->ki_pos > isize) {
Dave Chinnerb9d59842015-04-16 22:03:07 +1000332 spin_unlock(&ip->i_flags_lock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100333 if (!drained_dio) {
334 if (*iolock == XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100335 xfs_iunlock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100336 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100337 xfs_ilock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100338 iov_iter_reexpand(from, count);
339 }
Dave Chinner40c63fb2015-04-16 22:03:17 +1000340 /*
341 * We now have an IO submission barrier in place, but
342 * AIO can do EOF updates during IO completion and hence
343 * we now need to wait for all of them to drain. Non-AIO
344 * DIO will have drained before we are given the
345 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
346 * no-op.
347 */
348 inode_dio_wait(inode);
Brian Foster3136e8b2015-10-12 16:02:05 +1100349 drained_dio = true;
Dave Chinner7271d242011-08-25 07:17:02 +0000350 goto restart;
351 }
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700352
353 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
354 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
Christoph Hellwigf150b422019-10-19 09:09:46 -0700355 NULL, &xfs_buffered_write_iomap_ops);
Christoph Hellwig467f7892012-03-27 10:34:47 -0400356 if (error)
357 return error;
Dave Chinnerb9d59842015-04-16 22:03:07 +1000358 } else
359 spin_unlock(&ip->i_flags_lock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100360
361 /*
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000362 * Updating the timestamps will grab the ilock again from
363 * xfs_fs_dirty_inode, so we have to call it after dropping the
364 * lock above. Eventually we should look into a way to avoid
365 * the pointless lock roundtrip.
366 */
Amir Goldstein8c3f4062019-06-05 08:04:50 -0700367 return file_modified(file);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100368}
369
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100370static int
371xfs_dio_write_end_io(
372 struct kiocb *iocb,
373 ssize_t size,
Matthew Bobrowski6fe7b992019-09-19 15:32:44 -0700374 int error,
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100375 unsigned flags)
376{
377 struct inode *inode = file_inode(iocb->ki_filp);
378 struct xfs_inode *ip = XFS_I(inode);
379 loff_t offset = iocb->ki_pos;
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700380 unsigned int nofs_flag;
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100381
382 trace_xfs_end_io_direct_write(ip, offset, size);
383
384 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
385 return -EIO;
386
Matthew Bobrowski6fe7b992019-09-19 15:32:44 -0700387 if (error)
388 return error;
389 if (!size)
390 return 0;
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100391
Dave Chinnered5c3e62018-05-02 12:54:52 -0700392 /*
393 * Capture amount written on completion as we can't reliably account
394 * for it on submission.
395 */
396 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
397
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700398 /*
399 * We can allocate memory here while doing writeback on behalf of
400 * memory reclaim. To avoid memory allocation deadlocks set the
401 * task-wide nofs context for the following operations.
402 */
403 nofs_flag = memalloc_nofs_save();
404
Eryu Guanee70daa2017-09-21 11:26:18 -0700405 if (flags & IOMAP_DIO_COW) {
406 error = xfs_reflink_end_cow(ip, offset, size);
407 if (error)
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700408 goto out;
Eryu Guanee70daa2017-09-21 11:26:18 -0700409 }
410
411 /*
412 * Unwritten conversion updates the in-core isize after extent
413 * conversion but before updating the on-disk size. Updating isize any
414 * earlier allows a racing dio read to find unwritten extents before
415 * they are converted.
416 */
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700417 if (flags & IOMAP_DIO_UNWRITTEN) {
418 error = xfs_iomap_write_unwritten(ip, offset, size, true);
419 goto out;
420 }
Eryu Guanee70daa2017-09-21 11:26:18 -0700421
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100422 /*
423 * We need to update the in-core inode size here so that we don't end up
424 * with the on-disk inode size being outside the in-core inode size. We
425 * have no other method of updating EOF for AIO, so always do it here
426 * if necessary.
427 *
428 * We need to lock the test/set EOF update as we can be racing with
429 * other IO completions here to update the EOF. Failing to serialise
430 * here can result in EOF moving backwards and Bad Things Happen when
431 * that occurs.
432 */
433 spin_lock(&ip->i_flags_lock);
434 if (offset + size > i_size_read(inode)) {
435 i_size_write(inode, offset + size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700436 spin_unlock(&ip->i_flags_lock);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100437 error = xfs_setfilesize(ip, offset, size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700438 } else {
439 spin_unlock(&ip->i_flags_lock);
440 }
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100441
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700442out:
443 memalloc_nofs_restore(nofs_flag);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100444 return error;
445}
446
Christoph Hellwig838c4f32019-09-19 15:32:45 -0700447static const struct iomap_dio_ops xfs_dio_write_ops = {
448 .end_io = xfs_dio_write_end_io,
449};
450
Dave Chinner4d8d1582011-01-11 10:23:42 +1100451/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100452 * xfs_file_dio_aio_write - handle direct IO writes
453 *
454 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100455 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100456 * follow locking changes and looping.
457 *
Dave Chinnereda77982011-01-11 10:22:40 +1100458 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
459 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
460 * pages are flushed out.
461 *
462 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
463 * allowing them to be done in parallel with reads and other direct IO writes.
464 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
465 * needs to do sub-block zeroing and that requires serialisation against other
466 * direct IOs to the same block. In this case we need to serialise the
467 * submission of the unaligned IOs so that we don't get racing block zeroing in
468 * the dio layer. To avoid the problem with aio, we also need to wait for
469 * outstanding IOs to complete so that unwritten extent conversion is completed
470 * before we try to map the overlapping block. This is currently implemented by
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000471 * hitting it with a big hammer (i.e. inode_dio_wait()).
Dave Chinnereda77982011-01-11 10:22:40 +1100472 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100473 * Returns with locks held indicated by @iolock and errors indicated by
474 * negative return values.
475 */
476STATIC ssize_t
477xfs_file_dio_aio_write(
478 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400479 struct iov_iter *from)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100480{
481 struct file *file = iocb->ki_filp;
482 struct address_space *mapping = file->f_mapping;
483 struct inode *inode = mapping->host;
484 struct xfs_inode *ip = XFS_I(inode);
485 struct xfs_mount *mp = ip->i_mount;
486 ssize_t ret = 0;
Dave Chinnereda77982011-01-11 10:22:40 +1100487 int unaligned_io = 0;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000488 int iolock;
Al Virob3188912014-04-02 07:06:30 -0400489 size_t count = iov_iter_count(from);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100490 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100491 mp->m_rtdev_targp : mp->m_ddev_targp;
492
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600493 /* DIO must be aligned to device logical sector size */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000494 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000495 return -EINVAL;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100496
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100497 /*
498 * Don't take the exclusive iolock here unless the I/O is unaligned to
499 * the file system block size. We don't need to consider the EOF
500 * extension case here because xfs_file_aio_write_checks() will relock
501 * the inode as necessary for EOF zeroing cases and fill out the new
502 * inode size as appropriate.
503 */
Christoph Hellwig13712712016-04-07 08:51:57 -0700504 if ((iocb->ki_pos & mp->m_blockmask) ||
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100505 ((iocb->ki_pos + count) & mp->m_blockmask)) {
Dave Chinnereda77982011-01-11 10:22:40 +1100506 unaligned_io = 1;
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800507
508 /*
509 * We can't properly handle unaligned direct I/O to reflink
510 * files yet, as we can't unshare a partial block.
511 */
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800512 if (xfs_is_cow_inode(ip)) {
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800513 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
514 return -EREMCHG;
515 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000516 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100517 } else {
Christoph Hellwigd0606462011-12-18 20:00:14 +0000518 iolock = XFS_IOLOCK_SHARED;
Christoph Hellwigc58cb162011-08-27 14:42:53 +0000519 }
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100520
Christoph Hellwig942491c2017-10-23 18:31:50 -0700521 if (iocb->ki_flags & IOCB_NOWAIT) {
Darrick J. Wong1fdeaea2019-04-17 08:49:36 -0700522 /* unaligned dio always waits, bail */
523 if (unaligned_io)
524 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700525 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500526 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700527 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500528 xfs_ilock(ip, iolock);
529 }
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100530
Al Viro99733fa2015-04-07 14:25:18 -0400531 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100532 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000533 goto out;
Al Viro99733fa2015-04-07 14:25:18 -0400534 count = iov_iter_count(from);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100535
Dave Chinnereda77982011-01-11 10:22:40 +1100536 /*
Brian Foster2032a8a2019-03-25 17:01:45 -0700537 * If we are doing unaligned IO, we can't allow any other overlapping IO
538 * in-flight at the same time or we risk data corruption. Wait for all
539 * other IO to drain before we submit. If the IO is aligned, demote the
540 * iolock if we had to take the exclusive lock in
541 * xfs_file_aio_write_checks() for other reasons.
Dave Chinnereda77982011-01-11 10:22:40 +1100542 */
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500543 if (unaligned_io) {
Brian Foster2032a8a2019-03-25 17:01:45 -0700544 inode_dio_wait(inode);
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500545 } else if (iolock == XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100546 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000547 iolock = XFS_IOLOCK_SHARED;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100548 }
549
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000550 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
Brian Foster2032a8a2019-03-25 17:01:45 -0700551 /*
Jan Kara906753b2019-10-15 08:43:43 -0700552 * If unaligned, this is the only IO in-flight. Wait on it before we
553 * release the iolock to prevent subsequent overlapping IO.
Brian Foster2032a8a2019-03-25 17:01:45 -0700554 */
Christoph Hellwigf150b422019-10-19 09:09:46 -0700555 ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
556 &xfs_dio_write_ops,
Jan Kara906753b2019-10-15 08:43:43 -0700557 is_sync_kiocb(iocb) || unaligned_io);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000558out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100559 xfs_iunlock(ip, iolock);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000560
Dave Chinner6b698ed2015-06-04 09:18:53 +1000561 /*
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000562 * No fallback to buffered IO on errors for XFS, direct IO will either
563 * complete fully or fail.
Dave Chinner6b698ed2015-06-04 09:18:53 +1000564 */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000565 ASSERT(ret < 0 || ret == count);
566 return ret;
567}
568
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000569static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000570xfs_file_dax_write(
571 struct kiocb *iocb,
572 struct iov_iter *from)
573{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000574 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000575 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig17879e82016-09-19 11:24:50 +1000576 int iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000577 ssize_t ret, error = 0;
578 size_t count;
579 loff_t pos;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000580
Christoph Hellwig942491c2017-10-23 18:31:50 -0700581 if (iocb->ki_flags & IOCB_NOWAIT) {
582 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500583 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700584 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500585 xfs_ilock(ip, iolock);
586 }
587
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000588 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
589 if (ret)
590 goto out;
591
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000592 pos = iocb->ki_pos;
593 count = iov_iter_count(from);
Dave Chinner8b2180b2016-08-17 08:31:33 +1000594
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000595 trace_xfs_file_dax_write(ip, count, pos);
Christoph Hellwigf150b422019-10-19 09:09:46 -0700596 ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000597 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
598 i_size_write(inode, iocb->ki_pos);
599 error = xfs_setfilesize(ip, pos, ret);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000600 }
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000601out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100602 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700603 if (error)
604 return error;
605
606 if (ret > 0) {
607 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
608
609 /* Handle various SYNC-type writes */
610 ret = generic_write_sync(iocb, ret);
611 }
612 return ret;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100613}
614
Christoph Hellwig00258e32010-02-15 09:44:47 +0000615STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100616xfs_file_buffered_aio_write(
617 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400618 struct iov_iter *from)
Dave Chinner637bbc72011-01-11 10:17:30 +1100619{
620 struct file *file = iocb->ki_filp;
621 struct address_space *mapping = file->f_mapping;
622 struct inode *inode = mapping->host;
623 struct xfs_inode *ip = XFS_I(inode);
624 ssize_t ret;
625 int enospc = 0;
Brian Fosterc3155092017-01-27 23:22:56 -0800626 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100627
Christoph Hellwig91f99432017-08-29 16:13:20 +0200628 if (iocb->ki_flags & IOCB_NOWAIT)
629 return -EOPNOTSUPP;
630
Brian Fosterc3155092017-01-27 23:22:56 -0800631write_retry:
632 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100633 xfs_ilock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100634
Al Viro99733fa2015-04-07 14:25:18 -0400635 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100636 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000637 goto out;
Dave Chinner637bbc72011-01-11 10:17:30 +1100638
639 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100640 current->backing_dev_info = inode_to_bdi(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100641
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000642 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
Christoph Hellwigf150b422019-10-19 09:09:46 -0700643 ret = iomap_file_buffered_write(iocb, from,
644 &xfs_buffered_write_iomap_ops);
Al Viro0a64bc22014-02-11 22:25:22 -0500645 if (likely(ret >= 0))
Al Viro99733fa2015-04-07 14:25:18 -0400646 iocb->ki_pos += ret;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000647
Dave Chinner637bbc72011-01-11 10:17:30 +1100648 /*
Brian Fosterdc06f3982014-07-24 19:49:28 +1000649 * If we hit a space limit, try to free up some lingering preallocated
650 * space before returning an error. In the case of ENOSPC, first try to
651 * write back all dirty inodes to free up some of the excess reserved
652 * metadata space. This reduces the chances that the eofblocks scan
653 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
654 * also behaves as a filter to prevent too many eofblocks scans from
655 * running at the same time.
Dave Chinner637bbc72011-01-11 10:17:30 +1100656 */
Brian Fosterdc06f3982014-07-24 19:49:28 +1000657 if (ret == -EDQUOT && !enospc) {
Brian Fosterc3155092017-01-27 23:22:56 -0800658 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000659 enospc = xfs_inode_free_quota_eofblocks(ip);
660 if (enospc)
661 goto write_retry;
Darrick J. Wong83104d42016-10-03 09:11:46 -0700662 enospc = xfs_inode_free_quota_cowblocks(ip);
663 if (enospc)
664 goto write_retry;
Brian Fosterc3155092017-01-27 23:22:56 -0800665 iolock = 0;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000666 } else if (ret == -ENOSPC && !enospc) {
667 struct xfs_eofblocks eofb = {0};
668
Dave Chinner637bbc72011-01-11 10:17:30 +1100669 enospc = 1;
Dave Chinner9aa05002012-10-08 21:56:04 +1100670 xfs_flush_inodes(ip->i_mount);
Brian Fosterc3155092017-01-27 23:22:56 -0800671
672 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000673 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
674 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
Brian Fostercf2cb782017-06-20 14:36:19 -0700675 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100676 goto write_retry;
Dave Chinner637bbc72011-01-11 10:17:30 +1100677 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000678
Dave Chinner637bbc72011-01-11 10:17:30 +1100679 current->backing_dev_info = NULL;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000680out:
Brian Fosterc3155092017-01-27 23:22:56 -0800681 if (iolock)
682 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700683
684 if (ret > 0) {
685 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
686 /* Handle various SYNC-type writes */
687 ret = generic_write_sync(iocb, ret);
688 }
Dave Chinner637bbc72011-01-11 10:17:30 +1100689 return ret;
690}
691
692STATIC ssize_t
Al Virobf97f3bc2014-04-03 14:20:23 -0400693xfs_file_write_iter(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000694 struct kiocb *iocb,
Al Virobf97f3bc2014-04-03 14:20:23 -0400695 struct iov_iter *from)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000696{
697 struct file *file = iocb->ki_filp;
698 struct address_space *mapping = file->f_mapping;
699 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000700 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100701 ssize_t ret;
Al Virobf97f3bc2014-04-03 14:20:23 -0400702 size_t ocount = iov_iter_count(from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000703
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100704 XFS_STATS_INC(ip->i_mount, xs_write_calls);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000705
Dave Chinner637bbc72011-01-11 10:17:30 +1100706 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000707 return 0;
708
Al Virobf97f3bc2014-04-03 14:20:23 -0400709 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
710 return -EIO;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000711
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000712 if (IS_DAX(inode))
Dave Chinnered5c3e62018-05-02 12:54:52 -0700713 return xfs_file_dax_write(iocb, from);
714
715 if (iocb->ki_flags & IOCB_DIRECT) {
Darrick J. Wong0613f162016-10-03 09:11:37 -0700716 /*
717 * Allow a directio write to fall back to a buffered
718 * write *only* in the case that we're doing a reflink
719 * CoW. In all other directio scenarios we do not
720 * allow an operation to fall back to buffered mode.
721 */
Al Virobf97f3bc2014-04-03 14:20:23 -0400722 ret = xfs_file_dio_aio_write(iocb, from);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700723 if (ret != -EREMCHG)
724 return ret;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700725 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000726
Dave Chinnered5c3e62018-05-02 12:54:52 -0700727 return xfs_file_buffered_aio_write(iocb, from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000728}
729
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700730static void
731xfs_wait_dax_page(
Dave Jiange25ff832018-08-10 08:48:18 -0700732 struct inode *inode)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700733{
734 struct xfs_inode *ip = XFS_I(inode);
735
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700736 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
737 schedule();
738 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
739}
740
741static int
742xfs_break_dax_layouts(
743 struct inode *inode,
Dave Jiange25ff832018-08-10 08:48:18 -0700744 bool *retry)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700745{
746 struct page *page;
747
748 ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
749
750 page = dax_layout_busy_page(inode->i_mapping);
751 if (!page)
752 return 0;
753
Dave Jiange25ff832018-08-10 08:48:18 -0700754 *retry = true;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700755 return ___wait_var_event(&page->_refcount,
756 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
Dave Jiange25ff832018-08-10 08:48:18 -0700757 0, 0, xfs_wait_dax_page(inode));
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700758}
759
Dan Williams69eb5fa2018-03-20 14:42:38 -0700760int
761xfs_break_layouts(
762 struct inode *inode,
763 uint *iolock,
764 enum layout_break_reason reason)
765{
766 bool retry;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700767 int error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700768
769 ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
770
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700771 do {
772 retry = false;
773 switch (reason) {
774 case BREAK_UNMAP:
Eric Sandeena4722a62018-07-11 22:26:36 -0700775 error = xfs_break_dax_layouts(inode, &retry);
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700776 if (error || retry)
777 break;
778 /* fall through */
779 case BREAK_WRITE:
780 error = xfs_break_leased_layouts(inode, iolock, &retry);
781 break;
782 default:
783 WARN_ON_ONCE(1);
784 error = -EINVAL;
785 }
786 } while (error == 0 && retry);
787
788 return error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700789}
790
Namjae Jeona904b1c2015-03-25 15:08:56 +1100791#define XFS_FALLOC_FL_SUPPORTED \
792 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
793 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700794 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
Namjae Jeona904b1c2015-03-25 15:08:56 +1100795
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100796STATIC long
797xfs_file_fallocate(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700798 struct file *file,
799 int mode,
800 loff_t offset,
801 loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100802{
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700803 struct inode *inode = file_inode(file);
804 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700805 long error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100806 enum xfs_prealloc_flags flags = 0;
Dan Williamsc63a8ea2018-03-12 14:12:29 -0700807 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700808 loff_t new_size = 0;
Thomas Meyer749f24f2017-10-09 11:38:54 -0700809 bool do_file_insert = false;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100810
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700811 if (!S_ISREG(inode->i_mode))
812 return -EINVAL;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100813 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100814 return -EOPNOTSUPP;
815
Christoph Hellwig781355c2015-02-16 11:59:50 +1100816 xfs_ilock(ip, iolock);
Dan Williams69eb5fa2018-03-20 14:42:38 -0700817 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100818 if (error)
819 goto out_unlock;
820
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700821 if (mode & FALLOC_FL_PUNCH_HOLE) {
822 error = xfs_free_file_space(ip, offset, len);
823 if (error)
824 goto out_unlock;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100825 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
Fabian Frederick93407472017-02-27 14:28:32 -0800826 unsigned int blksize_mask = i_blocksize(inode) - 1;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100827
828 if (offset & blksize_mask || len & blksize_mask) {
Dave Chinner24513372014-06-25 14:58:08 +1000829 error = -EINVAL;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100830 goto out_unlock;
831 }
832
Lukas Czerner23fffa92014-04-12 09:56:41 -0400833 /*
834 * There is no need to overlap collapse range with EOF,
835 * in which case it is effectively a truncate operation
836 */
837 if (offset + len >= i_size_read(inode)) {
Dave Chinner24513372014-06-25 14:58:08 +1000838 error = -EINVAL;
Lukas Czerner23fffa92014-04-12 09:56:41 -0400839 goto out_unlock;
840 }
841
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100842 new_size = i_size_read(inode) - len;
843
844 error = xfs_collapse_file_space(ip, offset, len);
845 if (error)
846 goto out_unlock;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100847 } else if (mode & FALLOC_FL_INSERT_RANGE) {
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700848 unsigned int blksize_mask = i_blocksize(inode) - 1;
849 loff_t isize = i_size_read(inode);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100850
Namjae Jeona904b1c2015-03-25 15:08:56 +1100851 if (offset & blksize_mask || len & blksize_mask) {
852 error = -EINVAL;
853 goto out_unlock;
854 }
855
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700856 /*
857 * New inode size must not exceed ->s_maxbytes, accounting for
858 * possible signed overflow.
859 */
860 if (inode->i_sb->s_maxbytes - isize < len) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100861 error = -EFBIG;
862 goto out_unlock;
863 }
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700864 new_size = isize + len;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100865
866 /* Offset should be less than i_size */
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700867 if (offset >= isize) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100868 error = -EINVAL;
869 goto out_unlock;
870 }
Thomas Meyer749f24f2017-10-09 11:38:54 -0700871 do_file_insert = true;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700872 } else {
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100873 flags |= XFS_PREALLOC_SET;
874
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700875 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
876 offset + len > i_size_read(inode)) {
877 new_size = offset + len;
Dave Chinner24513372014-06-25 14:58:08 +1000878 error = inode_newsize_ok(inode, new_size);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700879 if (error)
880 goto out_unlock;
881 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100882
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800883 if (mode & FALLOC_FL_ZERO_RANGE) {
Lukas Czerner376ba312014-03-13 19:07:58 +1100884 error = xfs_zero_file_space(ip, offset, len);
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800885 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
886 error = xfs_reflink_unshare(ip, offset, len);
887 if (error)
888 goto out_unlock;
889
890 if (!xfs_is_always_cow_inode(ip)) {
891 error = xfs_alloc_file_space(ip, offset, len,
892 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700893 }
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800894 } else {
895 /*
896 * If always_cow mode we can't use preallocations and
897 * thus should not create them.
898 */
899 if (xfs_is_always_cow_inode(ip)) {
900 error = -EOPNOTSUPP;
901 goto out_unlock;
902 }
903
Lukas Czerner376ba312014-03-13 19:07:58 +1100904 error = xfs_alloc_file_space(ip, offset, len,
905 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700906 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100907 if (error)
908 goto out_unlock;
909 }
910
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700911 if (file->f_flags & O_DSYNC)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100912 flags |= XFS_PREALLOC_SYNC;
913
914 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100915 if (error)
916 goto out_unlock;
917
918 /* Change file size if needed */
919 if (new_size) {
920 struct iattr iattr;
921
922 iattr.ia_valid = ATTR_SIZE;
923 iattr.ia_size = new_size;
Jan Kara69bca802016-05-26 14:46:43 +0200924 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100925 if (error)
926 goto out_unlock;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100927 }
928
Namjae Jeona904b1c2015-03-25 15:08:56 +1100929 /*
930 * Perform hole insertion now that the file size has been
931 * updated so that if we crash during the operation we don't
932 * leave shifted extents past EOF and hence losing access to
933 * the data that is contained within them.
934 */
935 if (do_file_insert)
936 error = xfs_insert_file_space(ip, offset, len);
937
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100938out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100939 xfs_iunlock(ip, iolock);
Dave Chinner24513372014-06-25 14:58:08 +1000940 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100941}
942
Jan Kara40144e42019-08-29 09:04:12 -0700943STATIC int
944xfs_file_fadvise(
945 struct file *file,
946 loff_t start,
947 loff_t end,
948 int advice)
949{
950 struct xfs_inode *ip = XFS_I(file_inode(file));
951 int ret;
952 int lockflags = 0;
953
954 /*
955 * Operations creating pages in page cache need protection from hole
956 * punching and similar ops
957 */
958 if (advice == POSIX_FADV_WILLNEED) {
959 lockflags = XFS_IOLOCK_SHARED;
960 xfs_ilock(ip, lockflags);
961 }
962 ret = generic_fadvise(file, start, end, advice);
963 if (lockflags)
964 xfs_iunlock(ip, lockflags);
965 return ret;
966}
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100967
Eric Biggersda034bc2018-11-14 21:48:18 -0800968STATIC loff_t
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100969xfs_file_remap_range(
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100970 struct file *file_in,
971 loff_t pos_in,
972 struct file *file_out,
973 loff_t pos_out,
974 loff_t len,
975 unsigned int remap_flags)
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700976{
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100977 struct inode *inode_in = file_inode(file_in);
978 struct xfs_inode *src = XFS_I(inode_in);
979 struct inode *inode_out = file_inode(file_out);
980 struct xfs_inode *dest = XFS_I(inode_out);
981 struct xfs_mount *mp = src->i_mount;
982 loff_t remapped = 0;
983 xfs_extlen_t cowextsize;
984 int ret;
985
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100986 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
987 return -EINVAL;
Darrick J. Wongcc714662016-10-03 09:11:41 -0700988
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100989 if (!xfs_sb_version_hasreflink(&mp->m_sb))
990 return -EOPNOTSUPP;
991
992 if (XFS_FORCED_SHUTDOWN(mp))
993 return -EIO;
994
995 /* Prepare and then clone file data. */
996 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
997 &len, remap_flags);
998 if (ret < 0 || len == 0)
999 return ret;
1000
1001 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1002
1003 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1004 &remapped);
1005 if (ret)
1006 goto out_unlock;
1007
1008 /*
1009 * Carry the cowextsize hint from src to dest if we're sharing the
1010 * entire source file to the entire destination file, the source file
1011 * has a cowextsize hint, and the destination file does not.
1012 */
1013 cowextsize = 0;
1014 if (pos_in == 0 && len == i_size_read(inode_in) &&
1015 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1016 pos_out == 0 && len >= i_size_read(inode_out) &&
1017 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1018 cowextsize = src->i_d.di_cowextsize;
1019
1020 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1021 remap_flags);
1022
1023out_unlock:
1024 xfs_reflink_remap_unlock(file_in, file_out);
1025 if (ret)
1026 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1027 return remapped > 0 ? remapped : ret;
Darrick J. Wong9fe26042016-10-03 09:11:40 -07001028}
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001031xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001033 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001035 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001037 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1038 return -EIO;
Christoph Hellwig91f99432017-08-29 16:13:20 +02001039 file->f_mode |= FMODE_NOWAIT;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001040 return 0;
1041}
1042
1043STATIC int
1044xfs_dir_open(
1045 struct inode *inode,
1046 struct file *file)
1047{
1048 struct xfs_inode *ip = XFS_I(inode);
1049 int mode;
1050 int error;
1051
1052 error = xfs_file_open(inode, file);
1053 if (error)
1054 return error;
1055
1056 /*
1057 * If there are any blocks, read-ahead block 0 as we're almost
1058 * certain to have the next operation be a read there.
1059 */
Christoph Hellwig309ecac82013-12-06 12:30:09 -08001060 mode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001061 if (ip->i_d.di_nextents > 0)
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001062 error = xfs_dir3_data_readahead(ip, 0, -1);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001063 xfs_iunlock(ip, mode);
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001064 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001068xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 struct inode *inode,
1070 struct file *filp)
1071{
Dave Chinner24513372014-06-25 14:58:08 +10001072 return xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073}
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001076xfs_file_readdir(
Al Virob8227552013-05-22 17:07:56 -04001077 struct file *file,
1078 struct dir_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
Al Virob8227552013-05-22 17:07:56 -04001080 struct inode *inode = file_inode(file);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001081 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001082 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001084 /*
1085 * The Linux API doesn't pass down the total size of the buffer
1086 * we read into down to the filesystem. With the filldir concept
1087 * it's not needed for correct information, but the XFS dir2 leaf
1088 * code wants an estimate of the buffer size to calculate it's
1089 * readahead window and size the buffers used for mapping to
1090 * physical blocks.
1091 *
1092 * Try to give it an estimate that's good enough, maybe at some
1093 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +00001094 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001095 */
Darrick J. Wonga5c46e52017-10-17 21:37:44 -07001096 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Darrick J. Wongacb95532017-06-16 11:00:14 -07001098 return xfs_readdir(NULL, ip, ctx, bufsize);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001099}
1100
1101STATIC loff_t
1102xfs_file_llseek(
1103 struct file *file,
1104 loff_t offset,
Eric Sandeen59f9c002014-09-09 11:57:10 +10001105 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001106{
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001107 struct inode *inode = file->f_mapping->host;
1108
1109 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1110 return -EIO;
1111
Eric Sandeen59f9c002014-09-09 11:57:10 +10001112 switch (whence) {
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001113 default:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001114 return generic_file_llseek(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001115 case SEEK_HOLE:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001116 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001117 break;
Eric Sandeen49c69592014-09-09 11:56:48 +10001118 case SEEK_DATA:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001119 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001120 break;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001121 }
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001122
1123 if (offset < 0)
1124 return offset;
1125 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001126}
1127
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001128/*
1129 * Locking for serialisation of IO during page faults. This results in a lock
1130 * ordering of:
1131 *
1132 * mmap_sem (MM)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001133 * sb_start_pagefault(vfs, freeze)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001134 * i_mmaplock (XFS - truncate serialisation)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001135 * page_lock (MM)
1136 * i_lock (XFS - extent map serialisation)
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001137 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001138static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001139__xfs_filemap_fault(
Dave Jiangc791ace2017-02-24 14:57:08 -08001140 struct vm_fault *vmf,
Christoph Hellwigd522d562017-08-29 10:08:41 -07001141 enum page_entry_size pe_size,
1142 bool write_fault)
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001143{
Dave Jiangf4200392017-02-22 15:40:06 -08001144 struct inode *inode = file_inode(vmf->vma->vm_file);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001145 struct xfs_inode *ip = XFS_I(inode);
Souptick Joarder05edd882018-05-29 10:39:03 -07001146 vm_fault_t ret;
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001147
Christoph Hellwigd522d562017-08-29 10:08:41 -07001148 trace_xfs_filemap_fault(ip, pe_size, write_fault);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001149
Christoph Hellwigd522d562017-08-29 10:08:41 -07001150 if (write_fault) {
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001151 sb_start_pagefault(inode->i_sb);
Dave Jiangf4200392017-02-22 15:40:06 -08001152 file_update_time(vmf->vma->vm_file);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001153 }
1154
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001155 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001156 if (IS_DAX(inode)) {
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001157 pfn_t pfn;
1158
Christoph Hellwig690c2a32019-10-19 09:09:45 -07001159 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1160 (write_fault && !vmf->cow_page) ?
Christoph Hellwigf150b422019-10-19 09:09:46 -07001161 &xfs_direct_write_iomap_ops :
1162 &xfs_read_iomap_ops);
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001163 if (ret & VM_FAULT_NEEDDSYNC)
1164 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001165 } else {
1166 if (write_fault)
Christoph Hellwigf150b422019-10-19 09:09:46 -07001167 ret = iomap_page_mkwrite(vmf,
1168 &xfs_buffered_write_iomap_ops);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001169 else
1170 ret = filemap_fault(vmf);
1171 }
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001172 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001173
Christoph Hellwigd522d562017-08-29 10:08:41 -07001174 if (write_fault)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001175 sb_end_pagefault(inode->i_sb);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001176 return ret;
1177}
1178
Souptick Joarder05edd882018-05-29 10:39:03 -07001179static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001180xfs_filemap_fault(
1181 struct vm_fault *vmf)
1182{
1183 /* DAX can shortcut the normal fault path on write faults! */
1184 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1185 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1186 (vmf->flags & FAULT_FLAG_WRITE));
1187}
1188
Souptick Joarder05edd882018-05-29 10:39:03 -07001189static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001190xfs_filemap_huge_fault(
1191 struct vm_fault *vmf,
1192 enum page_entry_size pe_size)
1193{
1194 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1195 return VM_FAULT_FALLBACK;
1196
1197 /* DAX can shortcut the normal fault path on write faults! */
1198 return __xfs_filemap_fault(vmf, pe_size,
1199 (vmf->flags & FAULT_FLAG_WRITE));
1200}
1201
Souptick Joarder05edd882018-05-29 10:39:03 -07001202static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001203xfs_filemap_page_mkwrite(
1204 struct vm_fault *vmf)
1205{
1206 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1207}
1208
Dave Chinner3af49282015-11-03 12:37:02 +11001209/*
Jan Kara7b565c92017-11-01 16:36:46 +01001210 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1211 * on write faults. In reality, it needs to serialise against truncate and
1212 * prepare memory for writing so handle is as standard write fault.
Dave Chinner3af49282015-11-03 12:37:02 +11001213 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001214static vm_fault_t
Dave Chinner3af49282015-11-03 12:37:02 +11001215xfs_filemap_pfn_mkwrite(
Dave Chinner3af49282015-11-03 12:37:02 +11001216 struct vm_fault *vmf)
1217{
1218
Jan Kara7b565c92017-11-01 16:36:46 +01001219 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
Dave Chinner3af49282015-11-03 12:37:02 +11001220}
1221
Dave Chinner6b698ed2015-06-04 09:18:53 +10001222static const struct vm_operations_struct xfs_file_vm_ops = {
1223 .fault = xfs_filemap_fault,
Dave Jianga2d58162017-02-24 14:56:59 -08001224 .huge_fault = xfs_filemap_huge_fault,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001225 .map_pages = filemap_map_pages,
1226 .page_mkwrite = xfs_filemap_page_mkwrite,
Dave Chinner3af49282015-11-03 12:37:02 +11001227 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001228};
1229
1230STATIC int
1231xfs_file_mmap(
1232 struct file *filp,
1233 struct vm_area_struct *vma)
1234{
Pankaj Guptab21fec42019-07-05 19:33:28 +05301235 struct dax_device *dax_dev;
1236
1237 dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001238 /*
Pankaj Guptab21fec42019-07-05 19:33:28 +05301239 * We don't support synchronous mappings for non-DAX files and
1240 * for DAX files if underneath dax_device is not synchronous.
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001241 */
Pankaj Guptab21fec42019-07-05 19:33:28 +05301242 if (!daxdev_mapping_supported(vma, dax_dev))
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001243 return -EOPNOTSUPP;
1244
Dave Chinner6b698ed2015-06-04 09:18:53 +10001245 file_accessed(filp);
1246 vma->vm_ops = &xfs_file_vm_ops;
1247 if (IS_DAX(file_inode(filp)))
Dave Jiange1fb4a02018-08-17 15:43:40 -07001248 vma->vm_flags |= VM_HUGEPAGE;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001249 return 0;
Dave Chinner075a9242015-02-23 21:44:54 +11001250}
1251
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001252const struct file_operations xfs_file_operations = {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001253 .llseek = xfs_file_llseek,
Al Virob4f5d2c2014-04-02 14:37:59 -04001254 .read_iter = xfs_file_read_iter,
Al Virobf97f3bc2014-04-03 14:20:23 -04001255 .write_iter = xfs_file_write_iter,
Al Viro82c156f2016-09-22 23:35:42 -04001256 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001257 .splice_write = iter_file_splice_write,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001258 .iopoll = iomap_dio_iopoll,
Nathan Scott3562fd42006-03-14 14:00:35 +11001259 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001261 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001263 .mmap = xfs_file_mmap,
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001264 .mmap_supported_flags = MAP_SYNC,
Nathan Scott3562fd42006-03-14 14:00:35 +11001265 .open = xfs_file_open,
1266 .release = xfs_file_release,
1267 .fsync = xfs_file_fsync,
Toshi Kanidbe6ec82016-10-07 16:59:59 -07001268 .get_unmapped_area = thp_get_unmapped_area,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001269 .fallocate = xfs_file_fallocate,
Jan Kara40144e42019-08-29 09:04:12 -07001270 .fadvise = xfs_file_fadvise,
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +11001271 .remap_file_range = xfs_file_remap_range,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272};
1273
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001274const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001275 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 .read = generic_read_dir,
Al Viro3b0a3c12016-04-20 23:42:46 -04001277 .iterate_shared = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001278 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001279 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001280#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001281 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001282#endif
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +00001283 .fsync = xfs_dir_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284};