blob: 916a35cae5e94d649d6d8d181647ab870ce558d9 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +00007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +11009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110013#include "xfs_da_format.h"
14#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110016#include "xfs_trans.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000017#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000018#include "xfs_bmap.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100019#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_error.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100021#include "xfs_dir2.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100022#include "xfs_dir2_priv.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050023#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000024#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110025#include "xfs_log.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100026#include "xfs_icache.h"
Christoph Hellwig781355c2015-02-16 11:59:50 +110027#include "xfs_pnfs.h"
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100028#include "xfs_iomap.h"
Darrick J. Wong0613f162016-10-03 09:11:37 -070029#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <linux/dcache.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010032#include <linux/falloc.h>
Jeff Liud126d432012-08-21 17:11:57 +080033#include <linux/pagevec.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040034#include <linux/backing-dev.h>
Christoph Hellwiga39e5962017-11-01 16:36:47 +010035#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040037static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Christoph Hellwig8add71c2015-02-02 09:53:56 +110039int
40xfs_update_prealloc_flags(
41 struct xfs_inode *ip,
42 enum xfs_prealloc_flags flags)
43{
44 struct xfs_trans *tp;
45 int error;
46
Christoph Hellwig253f4912016-04-06 09:19:55 +100047 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
48 0, 0, 0, &tp);
49 if (error)
Christoph Hellwig8add71c2015-02-02 09:53:56 +110050 return error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110051
52 xfs_ilock(ip, XFS_ILOCK_EXCL);
53 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
54
55 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
Dave Chinnerc19b3b052016-02-09 16:54:58 +110056 VFS_I(ip)->i_mode &= ~S_ISUID;
57 if (VFS_I(ip)->i_mode & S_IXGRP)
58 VFS_I(ip)->i_mode &= ~S_ISGID;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110059 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
60 }
61
62 if (flags & XFS_PREALLOC_SET)
63 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
64 if (flags & XFS_PREALLOC_CLEAR)
65 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
66
67 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
68 if (flags & XFS_PREALLOC_SYNC)
69 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +100070 return xfs_trans_commit(tp);
Christoph Hellwig8add71c2015-02-02 09:53:56 +110071}
72
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000073/*
74 * Fsync operations on directories are much simpler than on regular files,
75 * as there is no file data to flush, and thus also no need for explicit
76 * cache flush operations, and there are no non-transaction metadata updates
77 * on directories either.
78 */
79STATIC int
80xfs_dir_fsync(
81 struct file *file,
82 loff_t start,
83 loff_t end,
84 int datasync)
85{
86 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
87 struct xfs_mount *mp = ip->i_mount;
88 xfs_lsn_t lsn = 0;
89
90 trace_xfs_dir_fsync(ip);
91
92 xfs_ilock(ip, XFS_ILOCK_SHARED);
93 if (xfs_ipincount(ip))
94 lsn = ip->i_itemp->ili_last_lsn;
95 xfs_iunlock(ip, XFS_ILOCK_SHARED);
96
97 if (!lsn)
98 return 0;
Christoph Hellwig656de4f2018-03-13 23:15:28 -070099 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +0000100}
101
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000102STATIC int
103xfs_file_fsync(
104 struct file *file,
Josef Bacik02c24a82011-07-16 20:44:56 -0400105 loff_t start,
106 loff_t end,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000107 int datasync)
108{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200109 struct inode *inode = file->f_mapping->host;
110 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000111 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000112 int error = 0;
113 int log_flushed = 0;
Christoph Hellwigb1037052011-09-19 14:55:51 +0000114 xfs_lsn_t lsn = 0;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000115
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000116 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000117
Jeff Layton1b180272017-07-06 07:02:30 -0400118 error = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -0400119 if (error)
120 return error;
121
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000122 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000123 return -EIO;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000124
125 xfs_iflags_clear(ip, XFS_ITRUNCATED);
126
Dave Chinner2291dab2016-12-09 16:49:54 +1100127 /*
128 * If we have an RT and/or log subvolume we need to make sure to flush
129 * the write cache the device used for file data first. This is to
130 * ensure newly written file data make it to disk before logging the new
131 * inode size in case of an extending write.
132 */
133 if (XFS_IS_REALTIME_INODE(ip))
134 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
135 else if (mp->m_logdev_targp != mp->m_ddev_targp)
136 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000137
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000138 /*
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100139 * All metadata updates are logged, which means that we just have to
140 * flush the log up to the latest LSN that touched the inode. If we have
141 * concurrent fsync/fdatasync() calls, we need them to all block on the
142 * log force before we clear the ili_fsync_fields field. This ensures
143 * that we don't get a racing sync operation that does not wait for the
144 * metadata to hit the journal before returning. If we race with
145 * clearing the ili_fsync_fields, then all that will happen is the log
146 * force will do nothing as the lsn will already be on disk. We can't
147 * race with setting ili_fsync_fields because that is done under
148 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
149 * until after the ili_fsync_fields is cleared.
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000150 */
151 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000152 if (xfs_ipincount(ip)) {
153 if (!datasync ||
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100154 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000155 lsn = ip->i_itemp->ili_last_lsn;
156 }
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000157
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100158 if (lsn) {
Christoph Hellwig656de4f2018-03-13 23:15:28 -0700159 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100160 ip->i_itemp->ili_fsync_fields = 0;
161 }
162 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigb1037052011-09-19 14:55:51 +0000163
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000164 /*
165 * If we only have a single device, and the log force about was
166 * a no-op we might have to flush the data device cache here.
167 * This can only happen for fdatasync/O_DSYNC if we were overwriting
168 * an already allocated file and thus do not have any metadata to
169 * commit.
170 */
Dave Chinner2291dab2016-12-09 16:49:54 +1100171 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
172 mp->m_logdev_targp == mp->m_ddev_targp)
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000173 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000174
Dave Chinner24513372014-06-25 14:58:08 +1000175 return error;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000176}
177
Christoph Hellwig00258e32010-02-15 09:44:47 +0000178STATIC ssize_t
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000179xfs_file_dio_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000180 struct kiocb *iocb,
Al Virob4f5d2c2014-04-02 14:37:59 -0400181 struct iov_iter *to)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000182{
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100183 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000184 size_t count = iov_iter_count(to);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100185 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000186
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000187 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000188
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000189 if (!count)
190 return 0; /* skip atime */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000191
Christoph Hellwiga447d7c2016-10-03 09:47:34 +1100192 file_accessed(iocb->ki_filp);
193
Christoph Hellwig65523212016-11-30 14:33:25 +1100194 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100195 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
Christoph Hellwig65523212016-11-30 14:33:25 +1100196 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100197
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000198 return ret;
199}
200
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000201static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000202xfs_file_dax_read(
203 struct kiocb *iocb,
204 struct iov_iter *to)
205{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000206 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000207 size_t count = iov_iter_count(to);
208 ssize_t ret = 0;
209
210 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
211
212 if (!count)
213 return 0; /* skip atime */
214
Christoph Hellwig942491c2017-10-23 18:31:50 -0700215 if (iocb->ki_flags & IOCB_NOWAIT) {
216 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500217 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700218 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500219 xfs_ilock(ip, XFS_IOLOCK_SHARED);
220 }
Christoph Hellwig942491c2017-10-23 18:31:50 -0700221
Ross Zwisler11c59c92016-11-08 11:32:46 +1100222 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
Christoph Hellwig65523212016-11-30 14:33:25 +1100223 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000224
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000225 file_accessed(iocb->ki_filp);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000226 return ret;
227}
228
229STATIC ssize_t
230xfs_file_buffered_aio_read(
231 struct kiocb *iocb,
232 struct iov_iter *to)
233{
234 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
235 ssize_t ret;
236
237 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
238
Christoph Hellwig942491c2017-10-23 18:31:50 -0700239 if (iocb->ki_flags & IOCB_NOWAIT) {
240 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Christoph Hellwig91f99432017-08-29 16:13:20 +0200241 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700242 } else {
Christoph Hellwig91f99432017-08-29 16:13:20 +0200243 xfs_ilock(ip, XFS_IOLOCK_SHARED);
244 }
Al Virob4f5d2c2014-04-02 14:37:59 -0400245 ret = generic_file_read_iter(iocb, to);
Christoph Hellwig65523212016-11-30 14:33:25 +1100246 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000247
248 return ret;
249}
250
251STATIC ssize_t
252xfs_file_read_iter(
253 struct kiocb *iocb,
254 struct iov_iter *to)
255{
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000256 struct inode *inode = file_inode(iocb->ki_filp);
257 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000258 ssize_t ret = 0;
259
260 XFS_STATS_INC(mp, xs_read_calls);
261
262 if (XFS_FORCED_SHUTDOWN(mp))
263 return -EIO;
264
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000265 if (IS_DAX(inode))
266 ret = xfs_file_dax_read(iocb, to);
267 else if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000268 ret = xfs_file_dio_aio_read(iocb, to);
269 else
270 ret = xfs_file_buffered_aio_read(iocb, to);
271
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000272 if (ret > 0)
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100273 XFS_STATS_ADD(mp, xs_read_bytes, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000274 return ret;
275}
276
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100277/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100278 * Common pre-write limit and setup checks.
279 *
Christoph Hellwig5bf1f262011-12-18 20:00:13 +0000280 * Called with the iolocked held either shared and exclusive according to
281 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
282 * if called for a direct write beyond i_size.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100283 */
284STATIC ssize_t
285xfs_file_aio_write_checks(
Al Viro99733fa2015-04-07 14:25:18 -0400286 struct kiocb *iocb,
287 struct iov_iter *from,
Dave Chinner4d8d1582011-01-11 10:23:42 +1100288 int *iolock)
289{
Al Viro99733fa2015-04-07 14:25:18 -0400290 struct file *file = iocb->ki_filp;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100291 struct inode *inode = file->f_mapping->host;
292 struct xfs_inode *ip = XFS_I(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400293 ssize_t error = 0;
Al Viro99733fa2015-04-07 14:25:18 -0400294 size_t count = iov_iter_count(from);
Brian Foster3136e8b2015-10-12 16:02:05 +1100295 bool drained_dio = false;
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700296 loff_t isize;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100297
Dave Chinner7271d242011-08-25 07:17:02 +0000298restart:
Al Viro3309dd02015-04-09 12:55:47 -0400299 error = generic_write_checks(iocb, from);
300 if (error <= 0)
Dave Chinner4d8d1582011-01-11 10:23:42 +1100301 return error;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100302
Dan Williams69eb5fa2018-03-20 14:42:38 -0700303 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100304 if (error)
305 return error;
306
Christoph Hellwig65523212016-11-30 14:33:25 +1100307 /*
308 * For changing security info in file_remove_privs() we need i_rwsem
309 * exclusively.
310 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200311 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100312 xfs_iunlock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200313 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100314 xfs_ilock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200315 goto restart;
316 }
Dave Chinner4d8d1582011-01-11 10:23:42 +1100317 /*
318 * If the offset is beyond the size of the file, we need to zero any
319 * blocks that fall between the existing EOF and the start of this
Christoph Hellwig2813d682011-12-18 20:00:12 +0000320 * write. If zeroing is needed and we are currently holding the
Christoph Hellwig467f7892012-03-27 10:34:47 -0400321 * iolock shared, we need to update it to exclusive which implies
322 * having to redo all checks before.
Dave Chinnerb9d59842015-04-16 22:03:07 +1000323 *
324 * We need to serialise against EOF updates that occur in IO
325 * completions here. We want to make sure that nobody is changing the
326 * size while we do this check until we have placed an IO barrier (i.e.
327 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
328 * The spinlock effectively forms a memory barrier once we have the
329 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
330 * and hence be able to correctly determine if we need to run zeroing.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100331 */
Dave Chinnerb9d59842015-04-16 22:03:07 +1000332 spin_lock(&ip->i_flags_lock);
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700333 isize = i_size_read(inode);
334 if (iocb->ki_pos > isize) {
Dave Chinnerb9d59842015-04-16 22:03:07 +1000335 spin_unlock(&ip->i_flags_lock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100336 if (!drained_dio) {
337 if (*iolock == XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100338 xfs_iunlock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100339 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100340 xfs_ilock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100341 iov_iter_reexpand(from, count);
342 }
Dave Chinner40c63fb2015-04-16 22:03:17 +1000343 /*
344 * We now have an IO submission barrier in place, but
345 * AIO can do EOF updates during IO completion and hence
346 * we now need to wait for all of them to drain. Non-AIO
347 * DIO will have drained before we are given the
348 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
349 * no-op.
350 */
351 inode_dio_wait(inode);
Brian Foster3136e8b2015-10-12 16:02:05 +1100352 drained_dio = true;
Dave Chinner7271d242011-08-25 07:17:02 +0000353 goto restart;
354 }
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700355
356 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
357 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
358 NULL, &xfs_iomap_ops);
Christoph Hellwig467f7892012-03-27 10:34:47 -0400359 if (error)
360 return error;
Dave Chinnerb9d59842015-04-16 22:03:07 +1000361 } else
362 spin_unlock(&ip->i_flags_lock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100363
364 /*
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000365 * Updating the timestamps will grab the ilock again from
366 * xfs_fs_dirty_inode, so we have to call it after dropping the
367 * lock above. Eventually we should look into a way to avoid
368 * the pointless lock roundtrip.
369 */
Amir Goldstein8c3f4062019-06-05 08:04:50 -0700370 return file_modified(file);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100371}
372
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100373static int
374xfs_dio_write_end_io(
375 struct kiocb *iocb,
376 ssize_t size,
377 unsigned flags)
378{
379 struct inode *inode = file_inode(iocb->ki_filp);
380 struct xfs_inode *ip = XFS_I(inode);
381 loff_t offset = iocb->ki_pos;
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100382 int error = 0;
383
384 trace_xfs_end_io_direct_write(ip, offset, size);
385
386 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
387 return -EIO;
388
389 if (size <= 0)
390 return size;
391
Dave Chinnered5c3e62018-05-02 12:54:52 -0700392 /*
393 * Capture amount written on completion as we can't reliably account
394 * for it on submission.
395 */
396 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
397
Eryu Guanee70daa2017-09-21 11:26:18 -0700398 if (flags & IOMAP_DIO_COW) {
399 error = xfs_reflink_end_cow(ip, offset, size);
400 if (error)
401 return error;
402 }
403
404 /*
405 * Unwritten conversion updates the in-core isize after extent
406 * conversion but before updating the on-disk size. Updating isize any
407 * earlier allows a racing dio read to find unwritten extents before
408 * they are converted.
409 */
410 if (flags & IOMAP_DIO_UNWRITTEN)
411 return xfs_iomap_write_unwritten(ip, offset, size, true);
412
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100413 /*
414 * We need to update the in-core inode size here so that we don't end up
415 * with the on-disk inode size being outside the in-core inode size. We
416 * have no other method of updating EOF for AIO, so always do it here
417 * if necessary.
418 *
419 * We need to lock the test/set EOF update as we can be racing with
420 * other IO completions here to update the EOF. Failing to serialise
421 * here can result in EOF moving backwards and Bad Things Happen when
422 * that occurs.
423 */
424 spin_lock(&ip->i_flags_lock);
425 if (offset + size > i_size_read(inode)) {
426 i_size_write(inode, offset + size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700427 spin_unlock(&ip->i_flags_lock);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100428 error = xfs_setfilesize(ip, offset, size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700429 } else {
430 spin_unlock(&ip->i_flags_lock);
431 }
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100432
433 return error;
434}
435
Dave Chinner4d8d1582011-01-11 10:23:42 +1100436/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100437 * xfs_file_dio_aio_write - handle direct IO writes
438 *
439 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100440 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100441 * follow locking changes and looping.
442 *
Dave Chinnereda77982011-01-11 10:22:40 +1100443 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
444 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
445 * pages are flushed out.
446 *
447 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
448 * allowing them to be done in parallel with reads and other direct IO writes.
449 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
450 * needs to do sub-block zeroing and that requires serialisation against other
451 * direct IOs to the same block. In this case we need to serialise the
452 * submission of the unaligned IOs so that we don't get racing block zeroing in
453 * the dio layer. To avoid the problem with aio, we also need to wait for
454 * outstanding IOs to complete so that unwritten extent conversion is completed
455 * before we try to map the overlapping block. This is currently implemented by
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000456 * hitting it with a big hammer (i.e. inode_dio_wait()).
Dave Chinnereda77982011-01-11 10:22:40 +1100457 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100458 * Returns with locks held indicated by @iolock and errors indicated by
459 * negative return values.
460 */
461STATIC ssize_t
462xfs_file_dio_aio_write(
463 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400464 struct iov_iter *from)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100465{
466 struct file *file = iocb->ki_filp;
467 struct address_space *mapping = file->f_mapping;
468 struct inode *inode = mapping->host;
469 struct xfs_inode *ip = XFS_I(inode);
470 struct xfs_mount *mp = ip->i_mount;
471 ssize_t ret = 0;
Dave Chinnereda77982011-01-11 10:22:40 +1100472 int unaligned_io = 0;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000473 int iolock;
Al Virob3188912014-04-02 07:06:30 -0400474 size_t count = iov_iter_count(from);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100475 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100476 mp->m_rtdev_targp : mp->m_ddev_targp;
477
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600478 /* DIO must be aligned to device logical sector size */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000479 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000480 return -EINVAL;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100481
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100482 /*
483 * Don't take the exclusive iolock here unless the I/O is unaligned to
484 * the file system block size. We don't need to consider the EOF
485 * extension case here because xfs_file_aio_write_checks() will relock
486 * the inode as necessary for EOF zeroing cases and fill out the new
487 * inode size as appropriate.
488 */
Christoph Hellwig13712712016-04-07 08:51:57 -0700489 if ((iocb->ki_pos & mp->m_blockmask) ||
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100490 ((iocb->ki_pos + count) & mp->m_blockmask)) {
Dave Chinnereda77982011-01-11 10:22:40 +1100491 unaligned_io = 1;
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800492
493 /*
494 * We can't properly handle unaligned direct I/O to reflink
495 * files yet, as we can't unshare a partial block.
496 */
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800497 if (xfs_is_cow_inode(ip)) {
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800498 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
499 return -EREMCHG;
500 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000501 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100502 } else {
Christoph Hellwigd0606462011-12-18 20:00:14 +0000503 iolock = XFS_IOLOCK_SHARED;
Christoph Hellwigc58cb162011-08-27 14:42:53 +0000504 }
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100505
Christoph Hellwig942491c2017-10-23 18:31:50 -0700506 if (iocb->ki_flags & IOCB_NOWAIT) {
Darrick J. Wong1fdeaea2019-04-17 08:49:36 -0700507 /* unaligned dio always waits, bail */
508 if (unaligned_io)
509 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700510 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500511 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700512 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500513 xfs_ilock(ip, iolock);
514 }
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100515
Al Viro99733fa2015-04-07 14:25:18 -0400516 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100517 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000518 goto out;
Al Viro99733fa2015-04-07 14:25:18 -0400519 count = iov_iter_count(from);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100520
Dave Chinnereda77982011-01-11 10:22:40 +1100521 /*
Brian Foster2032a8a2019-03-25 17:01:45 -0700522 * If we are doing unaligned IO, we can't allow any other overlapping IO
523 * in-flight at the same time or we risk data corruption. Wait for all
524 * other IO to drain before we submit. If the IO is aligned, demote the
525 * iolock if we had to take the exclusive lock in
526 * xfs_file_aio_write_checks() for other reasons.
Dave Chinnereda77982011-01-11 10:22:40 +1100527 */
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500528 if (unaligned_io) {
Brian Foster2032a8a2019-03-25 17:01:45 -0700529 inode_dio_wait(inode);
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500530 } else if (iolock == XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100531 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000532 iolock = XFS_IOLOCK_SHARED;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100533 }
534
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000535 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100536 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
Brian Foster2032a8a2019-03-25 17:01:45 -0700537
538 /*
539 * If unaligned, this is the only IO in-flight. If it has not yet
540 * completed, wait on it before we release the iolock to prevent
541 * subsequent overlapping IO.
542 */
543 if (ret == -EIOCBQUEUED && unaligned_io)
544 inode_dio_wait(inode);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000545out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100546 xfs_iunlock(ip, iolock);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000547
Dave Chinner6b698ed2015-06-04 09:18:53 +1000548 /*
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000549 * No fallback to buffered IO on errors for XFS, direct IO will either
550 * complete fully or fail.
Dave Chinner6b698ed2015-06-04 09:18:53 +1000551 */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000552 ASSERT(ret < 0 || ret == count);
553 return ret;
554}
555
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000556static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000557xfs_file_dax_write(
558 struct kiocb *iocb,
559 struct iov_iter *from)
560{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000561 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000562 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig17879e82016-09-19 11:24:50 +1000563 int iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000564 ssize_t ret, error = 0;
565 size_t count;
566 loff_t pos;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000567
Christoph Hellwig942491c2017-10-23 18:31:50 -0700568 if (iocb->ki_flags & IOCB_NOWAIT) {
569 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500570 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700571 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500572 xfs_ilock(ip, iolock);
573 }
574
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000575 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
576 if (ret)
577 goto out;
578
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000579 pos = iocb->ki_pos;
580 count = iov_iter_count(from);
Dave Chinner8b2180b2016-08-17 08:31:33 +1000581
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000582 trace_xfs_file_dax_write(ip, count, pos);
Ross Zwisler11c59c92016-11-08 11:32:46 +1100583 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000584 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
585 i_size_write(inode, iocb->ki_pos);
586 error = xfs_setfilesize(ip, pos, ret);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000587 }
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000588out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100589 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700590 if (error)
591 return error;
592
593 if (ret > 0) {
594 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
595
596 /* Handle various SYNC-type writes */
597 ret = generic_write_sync(iocb, ret);
598 }
599 return ret;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100600}
601
Christoph Hellwig00258e32010-02-15 09:44:47 +0000602STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100603xfs_file_buffered_aio_write(
604 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400605 struct iov_iter *from)
Dave Chinner637bbc72011-01-11 10:17:30 +1100606{
607 struct file *file = iocb->ki_filp;
608 struct address_space *mapping = file->f_mapping;
609 struct inode *inode = mapping->host;
610 struct xfs_inode *ip = XFS_I(inode);
611 ssize_t ret;
612 int enospc = 0;
Brian Fosterc3155092017-01-27 23:22:56 -0800613 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100614
Christoph Hellwig91f99432017-08-29 16:13:20 +0200615 if (iocb->ki_flags & IOCB_NOWAIT)
616 return -EOPNOTSUPP;
617
Brian Fosterc3155092017-01-27 23:22:56 -0800618write_retry:
619 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100620 xfs_ilock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100621
Al Viro99733fa2015-04-07 14:25:18 -0400622 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100623 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000624 goto out;
Dave Chinner637bbc72011-01-11 10:17:30 +1100625
626 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100627 current->backing_dev_info = inode_to_bdi(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100628
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000629 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +1000630 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
Al Viro0a64bc22014-02-11 22:25:22 -0500631 if (likely(ret >= 0))
Al Viro99733fa2015-04-07 14:25:18 -0400632 iocb->ki_pos += ret;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000633
Dave Chinner637bbc72011-01-11 10:17:30 +1100634 /*
Brian Fosterdc06f3982014-07-24 19:49:28 +1000635 * If we hit a space limit, try to free up some lingering preallocated
636 * space before returning an error. In the case of ENOSPC, first try to
637 * write back all dirty inodes to free up some of the excess reserved
638 * metadata space. This reduces the chances that the eofblocks scan
639 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
640 * also behaves as a filter to prevent too many eofblocks scans from
641 * running at the same time.
Dave Chinner637bbc72011-01-11 10:17:30 +1100642 */
Brian Fosterdc06f3982014-07-24 19:49:28 +1000643 if (ret == -EDQUOT && !enospc) {
Brian Fosterc3155092017-01-27 23:22:56 -0800644 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000645 enospc = xfs_inode_free_quota_eofblocks(ip);
646 if (enospc)
647 goto write_retry;
Darrick J. Wong83104d42016-10-03 09:11:46 -0700648 enospc = xfs_inode_free_quota_cowblocks(ip);
649 if (enospc)
650 goto write_retry;
Brian Fosterc3155092017-01-27 23:22:56 -0800651 iolock = 0;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000652 } else if (ret == -ENOSPC && !enospc) {
653 struct xfs_eofblocks eofb = {0};
654
Dave Chinner637bbc72011-01-11 10:17:30 +1100655 enospc = 1;
Dave Chinner9aa05002012-10-08 21:56:04 +1100656 xfs_flush_inodes(ip->i_mount);
Brian Fosterc3155092017-01-27 23:22:56 -0800657
658 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000659 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
660 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
Brian Fostercf2cb782017-06-20 14:36:19 -0700661 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100662 goto write_retry;
Dave Chinner637bbc72011-01-11 10:17:30 +1100663 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000664
Dave Chinner637bbc72011-01-11 10:17:30 +1100665 current->backing_dev_info = NULL;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000666out:
Brian Fosterc3155092017-01-27 23:22:56 -0800667 if (iolock)
668 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700669
670 if (ret > 0) {
671 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
672 /* Handle various SYNC-type writes */
673 ret = generic_write_sync(iocb, ret);
674 }
Dave Chinner637bbc72011-01-11 10:17:30 +1100675 return ret;
676}
677
678STATIC ssize_t
Al Virobf97f3bc2014-04-03 14:20:23 -0400679xfs_file_write_iter(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000680 struct kiocb *iocb,
Al Virobf97f3bc2014-04-03 14:20:23 -0400681 struct iov_iter *from)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000682{
683 struct file *file = iocb->ki_filp;
684 struct address_space *mapping = file->f_mapping;
685 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000686 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100687 ssize_t ret;
Al Virobf97f3bc2014-04-03 14:20:23 -0400688 size_t ocount = iov_iter_count(from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000689
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100690 XFS_STATS_INC(ip->i_mount, xs_write_calls);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000691
Dave Chinner637bbc72011-01-11 10:17:30 +1100692 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000693 return 0;
694
Al Virobf97f3bc2014-04-03 14:20:23 -0400695 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
696 return -EIO;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000697
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000698 if (IS_DAX(inode))
Dave Chinnered5c3e62018-05-02 12:54:52 -0700699 return xfs_file_dax_write(iocb, from);
700
701 if (iocb->ki_flags & IOCB_DIRECT) {
Darrick J. Wong0613f162016-10-03 09:11:37 -0700702 /*
703 * Allow a directio write to fall back to a buffered
704 * write *only* in the case that we're doing a reflink
705 * CoW. In all other directio scenarios we do not
706 * allow an operation to fall back to buffered mode.
707 */
Al Virobf97f3bc2014-04-03 14:20:23 -0400708 ret = xfs_file_dio_aio_write(iocb, from);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700709 if (ret != -EREMCHG)
710 return ret;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700711 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000712
Dave Chinnered5c3e62018-05-02 12:54:52 -0700713 return xfs_file_buffered_aio_write(iocb, from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000714}
715
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700716static void
717xfs_wait_dax_page(
Dave Jiange25ff832018-08-10 08:48:18 -0700718 struct inode *inode)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700719{
720 struct xfs_inode *ip = XFS_I(inode);
721
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700722 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
723 schedule();
724 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
725}
726
727static int
728xfs_break_dax_layouts(
729 struct inode *inode,
Dave Jiange25ff832018-08-10 08:48:18 -0700730 bool *retry)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700731{
732 struct page *page;
733
734 ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
735
736 page = dax_layout_busy_page(inode->i_mapping);
737 if (!page)
738 return 0;
739
Dave Jiange25ff832018-08-10 08:48:18 -0700740 *retry = true;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700741 return ___wait_var_event(&page->_refcount,
742 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
Dave Jiange25ff832018-08-10 08:48:18 -0700743 0, 0, xfs_wait_dax_page(inode));
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700744}
745
Dan Williams69eb5fa2018-03-20 14:42:38 -0700746int
747xfs_break_layouts(
748 struct inode *inode,
749 uint *iolock,
750 enum layout_break_reason reason)
751{
752 bool retry;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700753 int error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700754
755 ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
756
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700757 do {
758 retry = false;
759 switch (reason) {
760 case BREAK_UNMAP:
Eric Sandeena4722a62018-07-11 22:26:36 -0700761 error = xfs_break_dax_layouts(inode, &retry);
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700762 if (error || retry)
763 break;
764 /* fall through */
765 case BREAK_WRITE:
766 error = xfs_break_leased_layouts(inode, iolock, &retry);
767 break;
768 default:
769 WARN_ON_ONCE(1);
770 error = -EINVAL;
771 }
772 } while (error == 0 && retry);
773
774 return error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700775}
776
Namjae Jeona904b1c2015-03-25 15:08:56 +1100777#define XFS_FALLOC_FL_SUPPORTED \
778 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
779 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700780 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
Namjae Jeona904b1c2015-03-25 15:08:56 +1100781
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100782STATIC long
783xfs_file_fallocate(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700784 struct file *file,
785 int mode,
786 loff_t offset,
787 loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100788{
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700789 struct inode *inode = file_inode(file);
790 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700791 long error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100792 enum xfs_prealloc_flags flags = 0;
Dan Williamsc63a8ea2018-03-12 14:12:29 -0700793 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700794 loff_t new_size = 0;
Thomas Meyer749f24f2017-10-09 11:38:54 -0700795 bool do_file_insert = false;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100796
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700797 if (!S_ISREG(inode->i_mode))
798 return -EINVAL;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100799 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100800 return -EOPNOTSUPP;
801
Christoph Hellwig781355c2015-02-16 11:59:50 +1100802 xfs_ilock(ip, iolock);
Dan Williams69eb5fa2018-03-20 14:42:38 -0700803 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100804 if (error)
805 goto out_unlock;
806
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700807 if (mode & FALLOC_FL_PUNCH_HOLE) {
808 error = xfs_free_file_space(ip, offset, len);
809 if (error)
810 goto out_unlock;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100811 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
Fabian Frederick93407472017-02-27 14:28:32 -0800812 unsigned int blksize_mask = i_blocksize(inode) - 1;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100813
814 if (offset & blksize_mask || len & blksize_mask) {
Dave Chinner24513372014-06-25 14:58:08 +1000815 error = -EINVAL;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100816 goto out_unlock;
817 }
818
Lukas Czerner23fffa92014-04-12 09:56:41 -0400819 /*
820 * There is no need to overlap collapse range with EOF,
821 * in which case it is effectively a truncate operation
822 */
823 if (offset + len >= i_size_read(inode)) {
Dave Chinner24513372014-06-25 14:58:08 +1000824 error = -EINVAL;
Lukas Czerner23fffa92014-04-12 09:56:41 -0400825 goto out_unlock;
826 }
827
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100828 new_size = i_size_read(inode) - len;
829
830 error = xfs_collapse_file_space(ip, offset, len);
831 if (error)
832 goto out_unlock;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100833 } else if (mode & FALLOC_FL_INSERT_RANGE) {
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700834 unsigned int blksize_mask = i_blocksize(inode) - 1;
835 loff_t isize = i_size_read(inode);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100836
Namjae Jeona904b1c2015-03-25 15:08:56 +1100837 if (offset & blksize_mask || len & blksize_mask) {
838 error = -EINVAL;
839 goto out_unlock;
840 }
841
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700842 /*
843 * New inode size must not exceed ->s_maxbytes, accounting for
844 * possible signed overflow.
845 */
846 if (inode->i_sb->s_maxbytes - isize < len) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100847 error = -EFBIG;
848 goto out_unlock;
849 }
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700850 new_size = isize + len;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100851
852 /* Offset should be less than i_size */
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700853 if (offset >= isize) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100854 error = -EINVAL;
855 goto out_unlock;
856 }
Thomas Meyer749f24f2017-10-09 11:38:54 -0700857 do_file_insert = true;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700858 } else {
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100859 flags |= XFS_PREALLOC_SET;
860
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700861 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
862 offset + len > i_size_read(inode)) {
863 new_size = offset + len;
Dave Chinner24513372014-06-25 14:58:08 +1000864 error = inode_newsize_ok(inode, new_size);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700865 if (error)
866 goto out_unlock;
867 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100868
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800869 if (mode & FALLOC_FL_ZERO_RANGE) {
Lukas Czerner376ba312014-03-13 19:07:58 +1100870 error = xfs_zero_file_space(ip, offset, len);
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800871 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
872 error = xfs_reflink_unshare(ip, offset, len);
873 if (error)
874 goto out_unlock;
875
876 if (!xfs_is_always_cow_inode(ip)) {
877 error = xfs_alloc_file_space(ip, offset, len,
878 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700879 }
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800880 } else {
881 /*
882 * If always_cow mode we can't use preallocations and
883 * thus should not create them.
884 */
885 if (xfs_is_always_cow_inode(ip)) {
886 error = -EOPNOTSUPP;
887 goto out_unlock;
888 }
889
Lukas Czerner376ba312014-03-13 19:07:58 +1100890 error = xfs_alloc_file_space(ip, offset, len,
891 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700892 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100893 if (error)
894 goto out_unlock;
895 }
896
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700897 if (file->f_flags & O_DSYNC)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100898 flags |= XFS_PREALLOC_SYNC;
899
900 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100901 if (error)
902 goto out_unlock;
903
904 /* Change file size if needed */
905 if (new_size) {
906 struct iattr iattr;
907
908 iattr.ia_valid = ATTR_SIZE;
909 iattr.ia_size = new_size;
Jan Kara69bca802016-05-26 14:46:43 +0200910 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100911 if (error)
912 goto out_unlock;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100913 }
914
Namjae Jeona904b1c2015-03-25 15:08:56 +1100915 /*
916 * Perform hole insertion now that the file size has been
917 * updated so that if we crash during the operation we don't
918 * leave shifted extents past EOF and hence losing access to
919 * the data that is contained within them.
920 */
921 if (do_file_insert)
922 error = xfs_insert_file_space(ip, offset, len);
923
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100924out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100925 xfs_iunlock(ip, iolock);
Dave Chinner24513372014-06-25 14:58:08 +1000926 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100927}
928
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100929
Eric Biggersda034bc2018-11-14 21:48:18 -0800930STATIC loff_t
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100931xfs_file_remap_range(
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100932 struct file *file_in,
933 loff_t pos_in,
934 struct file *file_out,
935 loff_t pos_out,
936 loff_t len,
937 unsigned int remap_flags)
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700938{
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100939 struct inode *inode_in = file_inode(file_in);
940 struct xfs_inode *src = XFS_I(inode_in);
941 struct inode *inode_out = file_inode(file_out);
942 struct xfs_inode *dest = XFS_I(inode_out);
943 struct xfs_mount *mp = src->i_mount;
944 loff_t remapped = 0;
945 xfs_extlen_t cowextsize;
946 int ret;
947
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100948 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
949 return -EINVAL;
Darrick J. Wongcc714662016-10-03 09:11:41 -0700950
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100951 if (!xfs_sb_version_hasreflink(&mp->m_sb))
952 return -EOPNOTSUPP;
953
954 if (XFS_FORCED_SHUTDOWN(mp))
955 return -EIO;
956
957 /* Prepare and then clone file data. */
958 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
959 &len, remap_flags);
960 if (ret < 0 || len == 0)
961 return ret;
962
963 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
964
965 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
966 &remapped);
967 if (ret)
968 goto out_unlock;
969
970 /*
971 * Carry the cowextsize hint from src to dest if we're sharing the
972 * entire source file to the entire destination file, the source file
973 * has a cowextsize hint, and the destination file does not.
974 */
975 cowextsize = 0;
976 if (pos_in == 0 && len == i_size_read(inode_in) &&
977 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
978 pos_out == 0 && len >= i_size_read(inode_out) &&
979 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
980 cowextsize = src->i_d.di_cowextsize;
981
982 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
983 remap_flags);
984
985out_unlock:
986 xfs_reflink_remap_unlock(file_in, file_out);
987 if (ret)
988 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
989 return remapped > 0 ? remapped : ret;
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700990}
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100993xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100995 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100997 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100999 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1000 return -EIO;
Christoph Hellwig91f99432017-08-29 16:13:20 +02001001 file->f_mode |= FMODE_NOWAIT;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001002 return 0;
1003}
1004
1005STATIC int
1006xfs_dir_open(
1007 struct inode *inode,
1008 struct file *file)
1009{
1010 struct xfs_inode *ip = XFS_I(inode);
1011 int mode;
1012 int error;
1013
1014 error = xfs_file_open(inode, file);
1015 if (error)
1016 return error;
1017
1018 /*
1019 * If there are any blocks, read-ahead block 0 as we're almost
1020 * certain to have the next operation be a read there.
1021 */
Christoph Hellwig309ecac82013-12-06 12:30:09 -08001022 mode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001023 if (ip->i_d.di_nextents > 0)
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001024 error = xfs_dir3_data_readahead(ip, 0, -1);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001025 xfs_iunlock(ip, mode);
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001026 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027}
1028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001030xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 struct inode *inode,
1032 struct file *filp)
1033{
Dave Chinner24513372014-06-25 14:58:08 +10001034 return xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001038xfs_file_readdir(
Al Virob8227552013-05-22 17:07:56 -04001039 struct file *file,
1040 struct dir_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Al Virob8227552013-05-22 17:07:56 -04001042 struct inode *inode = file_inode(file);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001043 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001044 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001046 /*
1047 * The Linux API doesn't pass down the total size of the buffer
1048 * we read into down to the filesystem. With the filldir concept
1049 * it's not needed for correct information, but the XFS dir2 leaf
1050 * code wants an estimate of the buffer size to calculate it's
1051 * readahead window and size the buffers used for mapping to
1052 * physical blocks.
1053 *
1054 * Try to give it an estimate that's good enough, maybe at some
1055 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +00001056 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001057 */
Darrick J. Wonga5c46e52017-10-17 21:37:44 -07001058 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Darrick J. Wongacb95532017-06-16 11:00:14 -07001060 return xfs_readdir(NULL, ip, ctx, bufsize);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001061}
1062
1063STATIC loff_t
1064xfs_file_llseek(
1065 struct file *file,
1066 loff_t offset,
Eric Sandeen59f9c002014-09-09 11:57:10 +10001067 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001068{
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001069 struct inode *inode = file->f_mapping->host;
1070
1071 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1072 return -EIO;
1073
Eric Sandeen59f9c002014-09-09 11:57:10 +10001074 switch (whence) {
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001075 default:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001076 return generic_file_llseek(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001077 case SEEK_HOLE:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001078 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001079 break;
Eric Sandeen49c69592014-09-09 11:56:48 +10001080 case SEEK_DATA:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001081 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001082 break;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001083 }
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001084
1085 if (offset < 0)
1086 return offset;
1087 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001088}
1089
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001090/*
1091 * Locking for serialisation of IO during page faults. This results in a lock
1092 * ordering of:
1093 *
1094 * mmap_sem (MM)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001095 * sb_start_pagefault(vfs, freeze)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001096 * i_mmaplock (XFS - truncate serialisation)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001097 * page_lock (MM)
1098 * i_lock (XFS - extent map serialisation)
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001099 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001100static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001101__xfs_filemap_fault(
Dave Jiangc791ace2017-02-24 14:57:08 -08001102 struct vm_fault *vmf,
Christoph Hellwigd522d562017-08-29 10:08:41 -07001103 enum page_entry_size pe_size,
1104 bool write_fault)
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001105{
Dave Jiangf4200392017-02-22 15:40:06 -08001106 struct inode *inode = file_inode(vmf->vma->vm_file);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001107 struct xfs_inode *ip = XFS_I(inode);
Souptick Joarder05edd882018-05-29 10:39:03 -07001108 vm_fault_t ret;
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001109
Christoph Hellwigd522d562017-08-29 10:08:41 -07001110 trace_xfs_filemap_fault(ip, pe_size, write_fault);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001111
Christoph Hellwigd522d562017-08-29 10:08:41 -07001112 if (write_fault) {
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001113 sb_start_pagefault(inode->i_sb);
Dave Jiangf4200392017-02-22 15:40:06 -08001114 file_update_time(vmf->vma->vm_file);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001115 }
1116
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001117 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001118 if (IS_DAX(inode)) {
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001119 pfn_t pfn;
1120
Jan Karac0b24622018-01-07 16:38:43 -05001121 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001122 if (ret & VM_FAULT_NEEDDSYNC)
1123 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001124 } else {
1125 if (write_fault)
1126 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1127 else
1128 ret = filemap_fault(vmf);
1129 }
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001130 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001131
Christoph Hellwigd522d562017-08-29 10:08:41 -07001132 if (write_fault)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001133 sb_end_pagefault(inode->i_sb);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001134 return ret;
1135}
1136
Souptick Joarder05edd882018-05-29 10:39:03 -07001137static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001138xfs_filemap_fault(
1139 struct vm_fault *vmf)
1140{
1141 /* DAX can shortcut the normal fault path on write faults! */
1142 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1143 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1144 (vmf->flags & FAULT_FLAG_WRITE));
1145}
1146
Souptick Joarder05edd882018-05-29 10:39:03 -07001147static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001148xfs_filemap_huge_fault(
1149 struct vm_fault *vmf,
1150 enum page_entry_size pe_size)
1151{
1152 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1153 return VM_FAULT_FALLBACK;
1154
1155 /* DAX can shortcut the normal fault path on write faults! */
1156 return __xfs_filemap_fault(vmf, pe_size,
1157 (vmf->flags & FAULT_FLAG_WRITE));
1158}
1159
Souptick Joarder05edd882018-05-29 10:39:03 -07001160static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001161xfs_filemap_page_mkwrite(
1162 struct vm_fault *vmf)
1163{
1164 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1165}
1166
Dave Chinner3af49282015-11-03 12:37:02 +11001167/*
Jan Kara7b565c92017-11-01 16:36:46 +01001168 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1169 * on write faults. In reality, it needs to serialise against truncate and
1170 * prepare memory for writing so handle is as standard write fault.
Dave Chinner3af49282015-11-03 12:37:02 +11001171 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001172static vm_fault_t
Dave Chinner3af49282015-11-03 12:37:02 +11001173xfs_filemap_pfn_mkwrite(
Dave Chinner3af49282015-11-03 12:37:02 +11001174 struct vm_fault *vmf)
1175{
1176
Jan Kara7b565c92017-11-01 16:36:46 +01001177 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
Dave Chinner3af49282015-11-03 12:37:02 +11001178}
1179
Dave Chinner6b698ed2015-06-04 09:18:53 +10001180static const struct vm_operations_struct xfs_file_vm_ops = {
1181 .fault = xfs_filemap_fault,
Dave Jianga2d58162017-02-24 14:56:59 -08001182 .huge_fault = xfs_filemap_huge_fault,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001183 .map_pages = filemap_map_pages,
1184 .page_mkwrite = xfs_filemap_page_mkwrite,
Dave Chinner3af49282015-11-03 12:37:02 +11001185 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001186};
1187
1188STATIC int
1189xfs_file_mmap(
1190 struct file *filp,
1191 struct vm_area_struct *vma)
1192{
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001193 /*
1194 * We don't support synchronous mappings for non-DAX files. At least
1195 * until someone comes with a sensible use case.
1196 */
1197 if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1198 return -EOPNOTSUPP;
1199
Dave Chinner6b698ed2015-06-04 09:18:53 +10001200 file_accessed(filp);
1201 vma->vm_ops = &xfs_file_vm_ops;
1202 if (IS_DAX(file_inode(filp)))
Dave Jiange1fb4a02018-08-17 15:43:40 -07001203 vma->vm_flags |= VM_HUGEPAGE;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001204 return 0;
Dave Chinner075a9242015-02-23 21:44:54 +11001205}
1206
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001207const struct file_operations xfs_file_operations = {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001208 .llseek = xfs_file_llseek,
Al Virob4f5d2c2014-04-02 14:37:59 -04001209 .read_iter = xfs_file_read_iter,
Al Virobf97f3bc2014-04-03 14:20:23 -04001210 .write_iter = xfs_file_write_iter,
Al Viro82c156f2016-09-22 23:35:42 -04001211 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001212 .splice_write = iter_file_splice_write,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001213 .iopoll = iomap_dio_iopoll,
Nathan Scott3562fd42006-03-14 14:00:35 +11001214 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001216 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001218 .mmap = xfs_file_mmap,
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001219 .mmap_supported_flags = MAP_SYNC,
Nathan Scott3562fd42006-03-14 14:00:35 +11001220 .open = xfs_file_open,
1221 .release = xfs_file_release,
1222 .fsync = xfs_file_fsync,
Toshi Kanidbe6ec82016-10-07 16:59:59 -07001223 .get_unmapped_area = thp_get_unmapped_area,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001224 .fallocate = xfs_file_fallocate,
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +11001225 .remap_file_range = xfs_file_remap_range,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226};
1227
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001228const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001229 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 .read = generic_read_dir,
Al Viro3b0a3c12016-04-20 23:42:46 -04001231 .iterate_shared = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001232 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001233 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001234#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001235 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001236#endif
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +00001237 .fsync = xfs_dir_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238};