blob: 18146873a8b3b17579ffdac89cde99dd59f08c99 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000019#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110020#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110021#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110025#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110028#include "xfs_trans.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000029#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000030#include "xfs_bmap.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100031#include "xfs_bmap_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_error.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100033#include "xfs_dir2.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100034#include "xfs_dir2_priv.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050035#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000036#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110037#include "xfs_log.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100038#include "xfs_icache.h"
Christoph Hellwig781355c2015-02-16 11:59:50 +110039#include "xfs_pnfs.h"
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100040#include "xfs_iomap.h"
Darrick J. Wong0613f162016-10-03 09:11:37 -070041#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include <linux/dcache.h>
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010044#include <linux/falloc.h>
Jeff Liud126d432012-08-21 17:11:57 +080045#include <linux/pagevec.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040046#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040048static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Christoph Hellwigdda35b82010-02-15 09:44:46 +000050/*
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100051 * Clear the specified ranges to zero through either the pagecache or DAX.
52 * Holes and unwritten extents will be left as-is as they already are zeroed.
Christoph Hellwigdda35b82010-02-15 09:44:46 +000053 */
Dave Chinneref9d8732012-11-29 15:26:33 +110054int
Christoph Hellwig7bb41db2016-06-21 09:56:26 +100055xfs_zero_range(
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100056 struct xfs_inode *ip,
Christoph Hellwig7bb41db2016-06-21 09:56:26 +100057 xfs_off_t pos,
58 xfs_off_t count,
59 bool *did_zero)
Christoph Hellwigdda35b82010-02-15 09:44:46 +000060{
Eryu Guand20a5e32017-09-18 11:39:23 -070061 return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
Christoph Hellwigdda35b82010-02-15 09:44:46 +000062}
63
Christoph Hellwig8add71c2015-02-02 09:53:56 +110064int
65xfs_update_prealloc_flags(
66 struct xfs_inode *ip,
67 enum xfs_prealloc_flags flags)
68{
69 struct xfs_trans *tp;
70 int error;
71
Christoph Hellwig253f4912016-04-06 09:19:55 +100072 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
73 0, 0, 0, &tp);
74 if (error)
Christoph Hellwig8add71c2015-02-02 09:53:56 +110075 return error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110076
77 xfs_ilock(ip, XFS_ILOCK_EXCL);
78 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
79
80 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
Dave Chinnerc19b3b052016-02-09 16:54:58 +110081 VFS_I(ip)->i_mode &= ~S_ISUID;
82 if (VFS_I(ip)->i_mode & S_IXGRP)
83 VFS_I(ip)->i_mode &= ~S_ISGID;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110084 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
85 }
86
87 if (flags & XFS_PREALLOC_SET)
88 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
89 if (flags & XFS_PREALLOC_CLEAR)
90 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
91
92 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
93 if (flags & XFS_PREALLOC_SYNC)
94 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +100095 return xfs_trans_commit(tp);
Christoph Hellwig8add71c2015-02-02 09:53:56 +110096}
97
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000098/*
99 * Fsync operations on directories are much simpler than on regular files,
100 * as there is no file data to flush, and thus also no need for explicit
101 * cache flush operations, and there are no non-transaction metadata updates
102 * on directories either.
103 */
104STATIC int
105xfs_dir_fsync(
106 struct file *file,
107 loff_t start,
108 loff_t end,
109 int datasync)
110{
111 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
112 struct xfs_mount *mp = ip->i_mount;
113 xfs_lsn_t lsn = 0;
114
115 trace_xfs_dir_fsync(ip);
116
117 xfs_ilock(ip, XFS_ILOCK_SHARED);
118 if (xfs_ipincount(ip))
119 lsn = ip->i_itemp->ili_last_lsn;
120 xfs_iunlock(ip, XFS_ILOCK_SHARED);
121
122 if (!lsn)
123 return 0;
Dave Chinner24513372014-06-25 14:58:08 +1000124 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +0000125}
126
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000127STATIC int
128xfs_file_fsync(
129 struct file *file,
Josef Bacik02c24a82011-07-16 20:44:56 -0400130 loff_t start,
131 loff_t end,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000132 int datasync)
133{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000136 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000137 int error = 0;
138 int log_flushed = 0;
Christoph Hellwigb1037052011-09-19 14:55:51 +0000139 xfs_lsn_t lsn = 0;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000140
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000141 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000142
Jeff Layton1b180272017-07-06 07:02:30 -0400143 error = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -0400144 if (error)
145 return error;
146
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000147 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000148 return -EIO;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
Dave Chinner2291dab2016-12-09 16:49:54 +1100152 /*
153 * If we have an RT and/or log subvolume we need to make sure to flush
154 * the write cache the device used for file data first. This is to
155 * ensure newly written file data make it to disk before logging the new
156 * inode size in case of an extending write.
157 */
158 if (XFS_IS_REALTIME_INODE(ip))
159 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
160 else if (mp->m_logdev_targp != mp->m_ddev_targp)
161 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000162
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000163 /*
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100164 * All metadata updates are logged, which means that we just have to
165 * flush the log up to the latest LSN that touched the inode. If we have
166 * concurrent fsync/fdatasync() calls, we need them to all block on the
167 * log force before we clear the ili_fsync_fields field. This ensures
168 * that we don't get a racing sync operation that does not wait for the
169 * metadata to hit the journal before returning. If we race with
170 * clearing the ili_fsync_fields, then all that will happen is the log
171 * force will do nothing as the lsn will already be on disk. We can't
172 * race with setting ili_fsync_fields because that is done under
173 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
174 * until after the ili_fsync_fields is cleared.
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000175 */
176 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000177 if (xfs_ipincount(ip)) {
178 if (!datasync ||
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100179 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000180 lsn = ip->i_itemp->ili_last_lsn;
181 }
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000182
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100183 if (lsn) {
Christoph Hellwigb1037052011-09-19 14:55:51 +0000184 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100185 ip->i_itemp->ili_fsync_fields = 0;
186 }
187 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigb1037052011-09-19 14:55:51 +0000188
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000189 /*
190 * If we only have a single device, and the log force about was
191 * a no-op we might have to flush the data device cache here.
192 * This can only happen for fdatasync/O_DSYNC if we were overwriting
193 * an already allocated file and thus do not have any metadata to
194 * commit.
195 */
Dave Chinner2291dab2016-12-09 16:49:54 +1100196 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
197 mp->m_logdev_targp == mp->m_ddev_targp)
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000198 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000199
Dave Chinner24513372014-06-25 14:58:08 +1000200 return error;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000201}
202
Christoph Hellwig00258e32010-02-15 09:44:47 +0000203STATIC ssize_t
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000204xfs_file_dio_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000205 struct kiocb *iocb,
Al Virob4f5d2c2014-04-02 14:37:59 -0400206 struct iov_iter *to)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000207{
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100208 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000209 size_t count = iov_iter_count(to);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100210 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000211
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000212 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000213
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000214 if (!count)
215 return 0; /* skip atime */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000216
Christoph Hellwiga447d7c2016-10-03 09:47:34 +1100217 file_accessed(iocb->ki_filp);
218
Christoph Hellwig65523212016-11-30 14:33:25 +1100219 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100220 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
Christoph Hellwig65523212016-11-30 14:33:25 +1100221 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100222
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000223 return ret;
224}
225
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000226static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000227xfs_file_dax_read(
228 struct kiocb *iocb,
229 struct iov_iter *to)
230{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000231 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000232 size_t count = iov_iter_count(to);
233 ssize_t ret = 0;
234
235 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
236
237 if (!count)
238 return 0; /* skip atime */
239
Christoph Hellwig942491c2017-10-23 18:31:50 -0700240 if (iocb->ki_flags & IOCB_NOWAIT) {
241 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500242 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700243 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500244 xfs_ilock(ip, XFS_IOLOCK_SHARED);
245 }
Christoph Hellwig942491c2017-10-23 18:31:50 -0700246
Ross Zwisler11c59c92016-11-08 11:32:46 +1100247 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
Christoph Hellwig65523212016-11-30 14:33:25 +1100248 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000249
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000250 file_accessed(iocb->ki_filp);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000251 return ret;
252}
253
254STATIC ssize_t
255xfs_file_buffered_aio_read(
256 struct kiocb *iocb,
257 struct iov_iter *to)
258{
259 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
260 ssize_t ret;
261
262 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
263
Christoph Hellwig942491c2017-10-23 18:31:50 -0700264 if (iocb->ki_flags & IOCB_NOWAIT) {
265 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Christoph Hellwig91f99432017-08-29 16:13:20 +0200266 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700267 } else {
Christoph Hellwig91f99432017-08-29 16:13:20 +0200268 xfs_ilock(ip, XFS_IOLOCK_SHARED);
269 }
Al Virob4f5d2c2014-04-02 14:37:59 -0400270 ret = generic_file_read_iter(iocb, to);
Christoph Hellwig65523212016-11-30 14:33:25 +1100271 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000272
273 return ret;
274}
275
276STATIC ssize_t
277xfs_file_read_iter(
278 struct kiocb *iocb,
279 struct iov_iter *to)
280{
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000281 struct inode *inode = file_inode(iocb->ki_filp);
282 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000283 ssize_t ret = 0;
284
285 XFS_STATS_INC(mp, xs_read_calls);
286
287 if (XFS_FORCED_SHUTDOWN(mp))
288 return -EIO;
289
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000290 if (IS_DAX(inode))
291 ret = xfs_file_dax_read(iocb, to);
292 else if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000293 ret = xfs_file_dio_aio_read(iocb, to);
294 else
295 ret = xfs_file_buffered_aio_read(iocb, to);
296
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000297 if (ret > 0)
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100298 XFS_STATS_ADD(mp, xs_read_bytes, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000299 return ret;
300}
301
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100302/*
Christoph Hellwig193aec12012-03-27 10:34:49 -0400303 * Zero any on disk space between the current EOF and the new, larger EOF.
304 *
305 * This handles the normal case of zeroing the remainder of the last block in
306 * the file and the unusual case of zeroing blocks out beyond the size of the
307 * file. This second case only happens with fixed size extents and when the
308 * system crashes before the inode size was updated but after blocks were
309 * allocated.
310 *
311 * Expects the iolock to be held exclusive, and will take the ilock internally.
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000312 */
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000313int /* error (positive) */
314xfs_zero_eof(
Christoph Hellwig193aec12012-03-27 10:34:49 -0400315 struct xfs_inode *ip,
316 xfs_off_t offset, /* starting I/O offset */
Dave Chinner5885ebd2015-02-23 22:37:08 +1100317 xfs_fsize_t isize, /* current inode size */
318 bool *did_zeroing)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000319{
Christoph Hellwig193aec12012-03-27 10:34:49 -0400320 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000321 ASSERT(offset > isize);
322
Brian Foster0a50f162015-10-12 16:02:08 +1100323 trace_xfs_zero_eof(ip, isize, offset - isize);
Christoph Hellwig570b6212016-06-21 09:57:26 +1000324 return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000325}
326
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100327/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100328 * Common pre-write limit and setup checks.
329 *
Christoph Hellwig5bf1f262011-12-18 20:00:13 +0000330 * Called with the iolocked held either shared and exclusive according to
331 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
332 * if called for a direct write beyond i_size.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100333 */
334STATIC ssize_t
335xfs_file_aio_write_checks(
Al Viro99733fa2015-04-07 14:25:18 -0400336 struct kiocb *iocb,
337 struct iov_iter *from,
Dave Chinner4d8d1582011-01-11 10:23:42 +1100338 int *iolock)
339{
Al Viro99733fa2015-04-07 14:25:18 -0400340 struct file *file = iocb->ki_filp;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100341 struct inode *inode = file->f_mapping->host;
342 struct xfs_inode *ip = XFS_I(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400343 ssize_t error = 0;
Al Viro99733fa2015-04-07 14:25:18 -0400344 size_t count = iov_iter_count(from);
Brian Foster3136e8b2015-10-12 16:02:05 +1100345 bool drained_dio = false;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100346
Dave Chinner7271d242011-08-25 07:17:02 +0000347restart:
Al Viro3309dd02015-04-09 12:55:47 -0400348 error = generic_write_checks(iocb, from);
349 if (error <= 0)
Dave Chinner4d8d1582011-01-11 10:23:42 +1100350 return error;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100351
Christoph Hellwig65523212016-11-30 14:33:25 +1100352 error = xfs_break_layouts(inode, iolock);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100353 if (error)
354 return error;
355
Christoph Hellwig65523212016-11-30 14:33:25 +1100356 /*
357 * For changing security info in file_remove_privs() we need i_rwsem
358 * exclusively.
359 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200360 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100361 xfs_iunlock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200362 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100363 xfs_ilock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200364 goto restart;
365 }
Dave Chinner4d8d1582011-01-11 10:23:42 +1100366 /*
367 * If the offset is beyond the size of the file, we need to zero any
368 * blocks that fall between the existing EOF and the start of this
Christoph Hellwig2813d682011-12-18 20:00:12 +0000369 * write. If zeroing is needed and we are currently holding the
Christoph Hellwig467f7892012-03-27 10:34:47 -0400370 * iolock shared, we need to update it to exclusive which implies
371 * having to redo all checks before.
Dave Chinnerb9d59842015-04-16 22:03:07 +1000372 *
373 * We need to serialise against EOF updates that occur in IO
374 * completions here. We want to make sure that nobody is changing the
375 * size while we do this check until we have placed an IO barrier (i.e.
376 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
377 * The spinlock effectively forms a memory barrier once we have the
378 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
379 * and hence be able to correctly determine if we need to run zeroing.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100380 */
Dave Chinnerb9d59842015-04-16 22:03:07 +1000381 spin_lock(&ip->i_flags_lock);
Al Viro99733fa2015-04-07 14:25:18 -0400382 if (iocb->ki_pos > i_size_read(inode)) {
Dave Chinnerb9d59842015-04-16 22:03:07 +1000383 spin_unlock(&ip->i_flags_lock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100384 if (!drained_dio) {
385 if (*iolock == XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100386 xfs_iunlock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100387 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100388 xfs_ilock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100389 iov_iter_reexpand(from, count);
390 }
Dave Chinner40c63fb2015-04-16 22:03:17 +1000391 /*
392 * We now have an IO submission barrier in place, but
393 * AIO can do EOF updates during IO completion and hence
394 * we now need to wait for all of them to drain. Non-AIO
395 * DIO will have drained before we are given the
396 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
397 * no-op.
398 */
399 inode_dio_wait(inode);
Brian Foster3136e8b2015-10-12 16:02:05 +1100400 drained_dio = true;
Dave Chinner7271d242011-08-25 07:17:02 +0000401 goto restart;
402 }
Eryu Guan64671ba2017-09-18 11:38:58 -0700403 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), NULL);
Christoph Hellwig467f7892012-03-27 10:34:47 -0400404 if (error)
405 return error;
Dave Chinnerb9d59842015-04-16 22:03:07 +1000406 } else
407 spin_unlock(&ip->i_flags_lock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100408
409 /*
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000410 * Updating the timestamps will grab the ilock again from
411 * xfs_fs_dirty_inode, so we have to call it after dropping the
412 * lock above. Eventually we should look into a way to avoid
413 * the pointless lock roundtrip.
414 */
Josef Bacikc3b2da32012-03-26 09:59:21 -0400415 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
416 error = file_update_time(file);
417 if (error)
418 return error;
419 }
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000420
421 /*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100422 * If we're writing the file then make sure to clear the setuid and
423 * setgid bits if the process is not being run by root. This keeps
424 * people from modifying setuid and setgid binaries.
425 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200426 if (!IS_NOSEC(inode))
427 return file_remove_privs(file);
428 return 0;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100429}
430
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100431static int
432xfs_dio_write_end_io(
433 struct kiocb *iocb,
434 ssize_t size,
435 unsigned flags)
436{
437 struct inode *inode = file_inode(iocb->ki_filp);
438 struct xfs_inode *ip = XFS_I(inode);
439 loff_t offset = iocb->ki_pos;
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100440 int error = 0;
441
442 trace_xfs_end_io_direct_write(ip, offset, size);
443
444 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
445 return -EIO;
446
447 if (size <= 0)
448 return size;
449
Eryu Guanee70daa2017-09-21 11:26:18 -0700450 if (flags & IOMAP_DIO_COW) {
451 error = xfs_reflink_end_cow(ip, offset, size);
452 if (error)
453 return error;
454 }
455
456 /*
457 * Unwritten conversion updates the in-core isize after extent
458 * conversion but before updating the on-disk size. Updating isize any
459 * earlier allows a racing dio read to find unwritten extents before
460 * they are converted.
461 */
462 if (flags & IOMAP_DIO_UNWRITTEN)
463 return xfs_iomap_write_unwritten(ip, offset, size, true);
464
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100465 /*
466 * We need to update the in-core inode size here so that we don't end up
467 * with the on-disk inode size being outside the in-core inode size. We
468 * have no other method of updating EOF for AIO, so always do it here
469 * if necessary.
470 *
471 * We need to lock the test/set EOF update as we can be racing with
472 * other IO completions here to update the EOF. Failing to serialise
473 * here can result in EOF moving backwards and Bad Things Happen when
474 * that occurs.
475 */
476 spin_lock(&ip->i_flags_lock);
477 if (offset + size > i_size_read(inode)) {
478 i_size_write(inode, offset + size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700479 spin_unlock(&ip->i_flags_lock);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100480 error = xfs_setfilesize(ip, offset, size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700481 } else {
482 spin_unlock(&ip->i_flags_lock);
483 }
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100484
485 return error;
486}
487
Dave Chinner4d8d1582011-01-11 10:23:42 +1100488/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100489 * xfs_file_dio_aio_write - handle direct IO writes
490 *
491 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100492 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100493 * follow locking changes and looping.
494 *
Dave Chinnereda77982011-01-11 10:22:40 +1100495 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
496 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
497 * pages are flushed out.
498 *
499 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
500 * allowing them to be done in parallel with reads and other direct IO writes.
501 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
502 * needs to do sub-block zeroing and that requires serialisation against other
503 * direct IOs to the same block. In this case we need to serialise the
504 * submission of the unaligned IOs so that we don't get racing block zeroing in
505 * the dio layer. To avoid the problem with aio, we also need to wait for
506 * outstanding IOs to complete so that unwritten extent conversion is completed
507 * before we try to map the overlapping block. This is currently implemented by
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000508 * hitting it with a big hammer (i.e. inode_dio_wait()).
Dave Chinnereda77982011-01-11 10:22:40 +1100509 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100510 * Returns with locks held indicated by @iolock and errors indicated by
511 * negative return values.
512 */
513STATIC ssize_t
514xfs_file_dio_aio_write(
515 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400516 struct iov_iter *from)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100517{
518 struct file *file = iocb->ki_filp;
519 struct address_space *mapping = file->f_mapping;
520 struct inode *inode = mapping->host;
521 struct xfs_inode *ip = XFS_I(inode);
522 struct xfs_mount *mp = ip->i_mount;
523 ssize_t ret = 0;
Dave Chinnereda77982011-01-11 10:22:40 +1100524 int unaligned_io = 0;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000525 int iolock;
Al Virob3188912014-04-02 07:06:30 -0400526 size_t count = iov_iter_count(from);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100527 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100528 mp->m_rtdev_targp : mp->m_ddev_targp;
529
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600530 /* DIO must be aligned to device logical sector size */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000531 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000532 return -EINVAL;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100533
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100534 /*
535 * Don't take the exclusive iolock here unless the I/O is unaligned to
536 * the file system block size. We don't need to consider the EOF
537 * extension case here because xfs_file_aio_write_checks() will relock
538 * the inode as necessary for EOF zeroing cases and fill out the new
539 * inode size as appropriate.
540 */
Christoph Hellwig13712712016-04-07 08:51:57 -0700541 if ((iocb->ki_pos & mp->m_blockmask) ||
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100542 ((iocb->ki_pos + count) & mp->m_blockmask)) {
Dave Chinnereda77982011-01-11 10:22:40 +1100543 unaligned_io = 1;
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800544
545 /*
546 * We can't properly handle unaligned direct I/O to reflink
547 * files yet, as we can't unshare a partial block.
548 */
549 if (xfs_is_reflink_inode(ip)) {
550 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
551 return -EREMCHG;
552 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000553 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100554 } else {
Christoph Hellwigd0606462011-12-18 20:00:14 +0000555 iolock = XFS_IOLOCK_SHARED;
Christoph Hellwigc58cb162011-08-27 14:42:53 +0000556 }
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100557
Christoph Hellwig942491c2017-10-23 18:31:50 -0700558 if (iocb->ki_flags & IOCB_NOWAIT) {
559 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500560 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700561 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500562 xfs_ilock(ip, iolock);
563 }
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100564
Al Viro99733fa2015-04-07 14:25:18 -0400565 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100566 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000567 goto out;
Al Viro99733fa2015-04-07 14:25:18 -0400568 count = iov_iter_count(from);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100569
Dave Chinnereda77982011-01-11 10:22:40 +1100570 /*
571 * If we are doing unaligned IO, wait for all other IO to drain,
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100572 * otherwise demote the lock if we had to take the exclusive lock
573 * for other reasons in xfs_file_aio_write_checks.
Dave Chinnereda77982011-01-11 10:22:40 +1100574 */
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500575 if (unaligned_io) {
576 /* If we are going to wait for other DIO to finish, bail */
577 if (iocb->ki_flags & IOCB_NOWAIT) {
578 if (atomic_read(&inode->i_dio_count))
579 return -EAGAIN;
580 } else {
581 inode_dio_wait(inode);
582 }
583 } else if (iolock == XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100584 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000585 iolock = XFS_IOLOCK_SHARED;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100586 }
587
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000588 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100589 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000590out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100591 xfs_iunlock(ip, iolock);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000592
Dave Chinner6b698ed2015-06-04 09:18:53 +1000593 /*
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000594 * No fallback to buffered IO on errors for XFS, direct IO will either
595 * complete fully or fail.
Dave Chinner6b698ed2015-06-04 09:18:53 +1000596 */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000597 ASSERT(ret < 0 || ret == count);
598 return ret;
599}
600
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000601static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000602xfs_file_dax_write(
603 struct kiocb *iocb,
604 struct iov_iter *from)
605{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000606 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000607 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig17879e82016-09-19 11:24:50 +1000608 int iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000609 ssize_t ret, error = 0;
610 size_t count;
611 loff_t pos;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000612
Christoph Hellwig942491c2017-10-23 18:31:50 -0700613 if (iocb->ki_flags & IOCB_NOWAIT) {
614 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500615 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700616 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500617 xfs_ilock(ip, iolock);
618 }
619
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000620 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
621 if (ret)
622 goto out;
623
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000624 pos = iocb->ki_pos;
625 count = iov_iter_count(from);
Dave Chinner8b2180b2016-08-17 08:31:33 +1000626
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000627 trace_xfs_file_dax_write(ip, count, pos);
Ross Zwisler11c59c92016-11-08 11:32:46 +1100628 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000629 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
630 i_size_write(inode, iocb->ki_pos);
631 error = xfs_setfilesize(ip, pos, ret);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000632 }
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000633out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100634 xfs_iunlock(ip, iolock);
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000635 return error ? error : ret;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100636}
637
Christoph Hellwig00258e32010-02-15 09:44:47 +0000638STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100639xfs_file_buffered_aio_write(
640 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400641 struct iov_iter *from)
Dave Chinner637bbc72011-01-11 10:17:30 +1100642{
643 struct file *file = iocb->ki_filp;
644 struct address_space *mapping = file->f_mapping;
645 struct inode *inode = mapping->host;
646 struct xfs_inode *ip = XFS_I(inode);
647 ssize_t ret;
648 int enospc = 0;
Brian Fosterc3155092017-01-27 23:22:56 -0800649 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100650
Christoph Hellwig91f99432017-08-29 16:13:20 +0200651 if (iocb->ki_flags & IOCB_NOWAIT)
652 return -EOPNOTSUPP;
653
Brian Fosterc3155092017-01-27 23:22:56 -0800654write_retry:
655 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100656 xfs_ilock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100657
Al Viro99733fa2015-04-07 14:25:18 -0400658 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100659 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000660 goto out;
Dave Chinner637bbc72011-01-11 10:17:30 +1100661
662 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100663 current->backing_dev_info = inode_to_bdi(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100664
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000665 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +1000666 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
Al Viro0a64bc22014-02-11 22:25:22 -0500667 if (likely(ret >= 0))
Al Viro99733fa2015-04-07 14:25:18 -0400668 iocb->ki_pos += ret;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000669
Dave Chinner637bbc72011-01-11 10:17:30 +1100670 /*
Brian Fosterdc06f3982014-07-24 19:49:28 +1000671 * If we hit a space limit, try to free up some lingering preallocated
672 * space before returning an error. In the case of ENOSPC, first try to
673 * write back all dirty inodes to free up some of the excess reserved
674 * metadata space. This reduces the chances that the eofblocks scan
675 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
676 * also behaves as a filter to prevent too many eofblocks scans from
677 * running at the same time.
Dave Chinner637bbc72011-01-11 10:17:30 +1100678 */
Brian Fosterdc06f3982014-07-24 19:49:28 +1000679 if (ret == -EDQUOT && !enospc) {
Brian Fosterc3155092017-01-27 23:22:56 -0800680 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000681 enospc = xfs_inode_free_quota_eofblocks(ip);
682 if (enospc)
683 goto write_retry;
Darrick J. Wong83104d42016-10-03 09:11:46 -0700684 enospc = xfs_inode_free_quota_cowblocks(ip);
685 if (enospc)
686 goto write_retry;
Brian Fosterc3155092017-01-27 23:22:56 -0800687 iolock = 0;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000688 } else if (ret == -ENOSPC && !enospc) {
689 struct xfs_eofblocks eofb = {0};
690
Dave Chinner637bbc72011-01-11 10:17:30 +1100691 enospc = 1;
Dave Chinner9aa05002012-10-08 21:56:04 +1100692 xfs_flush_inodes(ip->i_mount);
Brian Fosterc3155092017-01-27 23:22:56 -0800693
694 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000695 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
696 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
Brian Fostercf2cb782017-06-20 14:36:19 -0700697 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100698 goto write_retry;
Dave Chinner637bbc72011-01-11 10:17:30 +1100699 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000700
Dave Chinner637bbc72011-01-11 10:17:30 +1100701 current->backing_dev_info = NULL;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000702out:
Brian Fosterc3155092017-01-27 23:22:56 -0800703 if (iolock)
704 xfs_iunlock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100705 return ret;
706}
707
708STATIC ssize_t
Al Virobf97f3bc2014-04-03 14:20:23 -0400709xfs_file_write_iter(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000710 struct kiocb *iocb,
Al Virobf97f3bc2014-04-03 14:20:23 -0400711 struct iov_iter *from)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000712{
713 struct file *file = iocb->ki_filp;
714 struct address_space *mapping = file->f_mapping;
715 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000716 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100717 ssize_t ret;
Al Virobf97f3bc2014-04-03 14:20:23 -0400718 size_t ocount = iov_iter_count(from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000719
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100720 XFS_STATS_INC(ip->i_mount, xs_write_calls);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000721
Dave Chinner637bbc72011-01-11 10:17:30 +1100722 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000723 return 0;
724
Al Virobf97f3bc2014-04-03 14:20:23 -0400725 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
726 return -EIO;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000727
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000728 if (IS_DAX(inode))
729 ret = xfs_file_dax_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700730 else if (iocb->ki_flags & IOCB_DIRECT) {
731 /*
732 * Allow a directio write to fall back to a buffered
733 * write *only* in the case that we're doing a reflink
734 * CoW. In all other directio scenarios we do not
735 * allow an operation to fall back to buffered mode.
736 */
Al Virobf97f3bc2014-04-03 14:20:23 -0400737 ret = xfs_file_dio_aio_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700738 if (ret == -EREMCHG)
739 goto buffered;
740 } else {
741buffered:
Al Virobf97f3bc2014-04-03 14:20:23 -0400742 ret = xfs_file_buffered_aio_write(iocb, from);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700743 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000744
Christoph Hellwigd0606462011-12-18 20:00:14 +0000745 if (ret > 0) {
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100746 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
Christoph Hellwigce7ae1512011-12-18 20:00:11 +0000747
Christoph Hellwigd0606462011-12-18 20:00:14 +0000748 /* Handle various SYNC-type writes */
Christoph Hellwige2592212016-04-07 08:52:01 -0700749 ret = generic_write_sync(iocb, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000750 }
Dave Chinnera363f0c2011-01-11 10:13:53 +1100751 return ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000752}
753
Namjae Jeona904b1c2015-03-25 15:08:56 +1100754#define XFS_FALLOC_FL_SUPPORTED \
755 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
756 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700757 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
Namjae Jeona904b1c2015-03-25 15:08:56 +1100758
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100759STATIC long
760xfs_file_fallocate(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700761 struct file *file,
762 int mode,
763 loff_t offset,
764 loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100765{
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700766 struct inode *inode = file_inode(file);
767 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700768 long error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100769 enum xfs_prealloc_flags flags = 0;
Christoph Hellwig781355c2015-02-16 11:59:50 +1100770 uint iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700771 loff_t new_size = 0;
Thomas Meyer749f24f2017-10-09 11:38:54 -0700772 bool do_file_insert = false;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100773
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700774 if (!S_ISREG(inode->i_mode))
775 return -EINVAL;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100776 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100777 return -EOPNOTSUPP;
778
Christoph Hellwig781355c2015-02-16 11:59:50 +1100779 xfs_ilock(ip, iolock);
Christoph Hellwig65523212016-11-30 14:33:25 +1100780 error = xfs_break_layouts(inode, &iolock);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100781 if (error)
782 goto out_unlock;
783
Dave Chinnere8e9ad42015-02-23 21:45:32 +1100784 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
785 iolock |= XFS_MMAPLOCK_EXCL;
786
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700787 if (mode & FALLOC_FL_PUNCH_HOLE) {
788 error = xfs_free_file_space(ip, offset, len);
789 if (error)
790 goto out_unlock;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100791 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
Fabian Frederick93407472017-02-27 14:28:32 -0800792 unsigned int blksize_mask = i_blocksize(inode) - 1;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100793
794 if (offset & blksize_mask || len & blksize_mask) {
Dave Chinner24513372014-06-25 14:58:08 +1000795 error = -EINVAL;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100796 goto out_unlock;
797 }
798
Lukas Czerner23fffa92014-04-12 09:56:41 -0400799 /*
800 * There is no need to overlap collapse range with EOF,
801 * in which case it is effectively a truncate operation
802 */
803 if (offset + len >= i_size_read(inode)) {
Dave Chinner24513372014-06-25 14:58:08 +1000804 error = -EINVAL;
Lukas Czerner23fffa92014-04-12 09:56:41 -0400805 goto out_unlock;
806 }
807
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100808 new_size = i_size_read(inode) - len;
809
810 error = xfs_collapse_file_space(ip, offset, len);
811 if (error)
812 goto out_unlock;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100813 } else if (mode & FALLOC_FL_INSERT_RANGE) {
Fabian Frederick93407472017-02-27 14:28:32 -0800814 unsigned int blksize_mask = i_blocksize(inode) - 1;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100815
816 new_size = i_size_read(inode) + len;
817 if (offset & blksize_mask || len & blksize_mask) {
818 error = -EINVAL;
819 goto out_unlock;
820 }
821
822 /* check the new inode size does not wrap through zero */
823 if (new_size > inode->i_sb->s_maxbytes) {
824 error = -EFBIG;
825 goto out_unlock;
826 }
827
828 /* Offset should be less than i_size */
829 if (offset >= i_size_read(inode)) {
830 error = -EINVAL;
831 goto out_unlock;
832 }
Thomas Meyer749f24f2017-10-09 11:38:54 -0700833 do_file_insert = true;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700834 } else {
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100835 flags |= XFS_PREALLOC_SET;
836
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700837 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
838 offset + len > i_size_read(inode)) {
839 new_size = offset + len;
Dave Chinner24513372014-06-25 14:58:08 +1000840 error = inode_newsize_ok(inode, new_size);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700841 if (error)
842 goto out_unlock;
843 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100844
Lukas Czerner376ba312014-03-13 19:07:58 +1100845 if (mode & FALLOC_FL_ZERO_RANGE)
846 error = xfs_zero_file_space(ip, offset, len);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700847 else {
848 if (mode & FALLOC_FL_UNSHARE_RANGE) {
849 error = xfs_reflink_unshare(ip, offset, len);
850 if (error)
851 goto out_unlock;
852 }
Lukas Czerner376ba312014-03-13 19:07:58 +1100853 error = xfs_alloc_file_space(ip, offset, len,
854 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700855 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100856 if (error)
857 goto out_unlock;
858 }
859
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700860 if (file->f_flags & O_DSYNC)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100861 flags |= XFS_PREALLOC_SYNC;
862
863 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100864 if (error)
865 goto out_unlock;
866
867 /* Change file size if needed */
868 if (new_size) {
869 struct iattr iattr;
870
871 iattr.ia_valid = ATTR_SIZE;
872 iattr.ia_size = new_size;
Jan Kara69bca802016-05-26 14:46:43 +0200873 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100874 if (error)
875 goto out_unlock;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100876 }
877
Namjae Jeona904b1c2015-03-25 15:08:56 +1100878 /*
879 * Perform hole insertion now that the file size has been
880 * updated so that if we crash during the operation we don't
881 * leave shifted extents past EOF and hence losing access to
882 * the data that is contained within them.
883 */
884 if (do_file_insert)
885 error = xfs_insert_file_space(ip, offset, len);
886
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100887out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100888 xfs_iunlock(ip, iolock);
Dave Chinner24513372014-06-25 14:58:08 +1000889 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100890}
891
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700892STATIC int
893xfs_file_clone_range(
894 struct file *file_in,
895 loff_t pos_in,
896 struct file *file_out,
897 loff_t pos_out,
898 u64 len)
899{
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +1100900 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
Darrick J. Wongcc714662016-10-03 09:11:41 -0700901 len, false);
902}
903
Darrick J. Wongcc714662016-10-03 09:11:41 -0700904STATIC ssize_t
905xfs_file_dedupe_range(
906 struct file *src_file,
907 u64 loff,
908 u64 len,
909 struct file *dst_file,
910 u64 dst_loff)
911{
912 int error;
913
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +1100914 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
Darrick J. Wongcc714662016-10-03 09:11:41 -0700915 len, true);
916 if (error)
917 return error;
918 return len;
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700919}
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100920
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100922xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100924 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100926 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100928 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
929 return -EIO;
Christoph Hellwig91f99432017-08-29 16:13:20 +0200930 file->f_mode |= FMODE_NOWAIT;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100931 return 0;
932}
933
934STATIC int
935xfs_dir_open(
936 struct inode *inode,
937 struct file *file)
938{
939 struct xfs_inode *ip = XFS_I(inode);
940 int mode;
941 int error;
942
943 error = xfs_file_open(inode, file);
944 if (error)
945 return error;
946
947 /*
948 * If there are any blocks, read-ahead block 0 as we're almost
949 * certain to have the next operation be a read there.
950 */
Christoph Hellwig309ecac82013-12-06 12:30:09 -0800951 mode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100952 if (ip->i_d.di_nextents > 0)
Darrick J. Wong7a652bb2017-02-02 15:13:58 -0800953 error = xfs_dir3_data_readahead(ip, 0, -1);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +1100954 xfs_iunlock(ip, mode);
Darrick J. Wong7a652bb2017-02-02 15:13:58 -0800955 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100959xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 struct inode *inode,
961 struct file *filp)
962{
Dave Chinner24513372014-06-25 14:58:08 +1000963 return xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +1100967xfs_file_readdir(
Al Virob8227552013-05-22 17:07:56 -0400968 struct file *file,
969 struct dir_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970{
Al Virob8227552013-05-22 17:07:56 -0400971 struct inode *inode = file_inode(file);
Christoph Hellwig739bfb22007-08-29 10:58:01 +1000972 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000973 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000975 /*
976 * The Linux API doesn't pass down the total size of the buffer
977 * we read into down to the filesystem. With the filldir concept
978 * it's not needed for correct information, but the XFS dir2 leaf
979 * code wants an estimate of the buffer size to calculate it's
980 * readahead window and size the buffers used for mapping to
981 * physical blocks.
982 *
983 * Try to give it an estimate that's good enough, maybe at some
984 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +0000985 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +1000986 */
Darrick J. Wonga5c46e52017-10-17 21:37:44 -0700987 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Darrick J. Wongacb95532017-06-16 11:00:14 -0700989 return xfs_readdir(NULL, ip, ctx, bufsize);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +0800990}
991
992STATIC loff_t
993xfs_file_llseek(
994 struct file *file,
995 loff_t offset,
Eric Sandeen59f9c002014-09-09 11:57:10 +1000996 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +0800997{
Christoph Hellwig9b2970a2017-06-29 11:43:21 -0700998 struct inode *inode = file->f_mapping->host;
999
1000 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1001 return -EIO;
1002
Eric Sandeen59f9c002014-09-09 11:57:10 +10001003 switch (whence) {
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001004 default:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001005 return generic_file_llseek(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001006 case SEEK_HOLE:
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001007 offset = iomap_seek_hole(inode, offset, &xfs_iomap_ops);
1008 break;
Eric Sandeen49c69592014-09-09 11:56:48 +10001009 case SEEK_DATA:
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001010 offset = iomap_seek_data(inode, offset, &xfs_iomap_ops);
1011 break;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001012 }
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001013
1014 if (offset < 0)
1015 return offset;
1016 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001017}
1018
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001019/*
1020 * Locking for serialisation of IO during page faults. This results in a lock
1021 * ordering of:
1022 *
1023 * mmap_sem (MM)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001024 * sb_start_pagefault(vfs, freeze)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001025 * i_mmaplock (XFS - truncate serialisation)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001026 * page_lock (MM)
1027 * i_lock (XFS - extent map serialisation)
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001028 */
Christoph Hellwigd522d562017-08-29 10:08:41 -07001029static int
1030__xfs_filemap_fault(
Dave Jiangc791ace2017-02-24 14:57:08 -08001031 struct vm_fault *vmf,
Christoph Hellwigd522d562017-08-29 10:08:41 -07001032 enum page_entry_size pe_size,
1033 bool write_fault)
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001034{
Dave Jiangf4200392017-02-22 15:40:06 -08001035 struct inode *inode = file_inode(vmf->vma->vm_file);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001036 struct xfs_inode *ip = XFS_I(inode);
1037 int ret;
1038
Christoph Hellwigd522d562017-08-29 10:08:41 -07001039 trace_xfs_filemap_fault(ip, pe_size, write_fault);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001040
Christoph Hellwigd522d562017-08-29 10:08:41 -07001041 if (write_fault) {
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001042 sb_start_pagefault(inode->i_sb);
Dave Jiangf4200392017-02-22 15:40:06 -08001043 file_update_time(vmf->vma->vm_file);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001044 }
1045
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001046 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001047 if (IS_DAX(inode)) {
1048 ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
1049 } else {
1050 if (write_fault)
1051 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1052 else
1053 ret = filemap_fault(vmf);
1054 }
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001055 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001056
Christoph Hellwigd522d562017-08-29 10:08:41 -07001057 if (write_fault)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001058 sb_end_pagefault(inode->i_sb);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001059 return ret;
1060}
1061
Christoph Hellwigd522d562017-08-29 10:08:41 -07001062static int
1063xfs_filemap_fault(
1064 struct vm_fault *vmf)
1065{
1066 /* DAX can shortcut the normal fault path on write faults! */
1067 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1068 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1069 (vmf->flags & FAULT_FLAG_WRITE));
1070}
1071
1072static int
1073xfs_filemap_huge_fault(
1074 struct vm_fault *vmf,
1075 enum page_entry_size pe_size)
1076{
1077 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1078 return VM_FAULT_FALLBACK;
1079
1080 /* DAX can shortcut the normal fault path on write faults! */
1081 return __xfs_filemap_fault(vmf, pe_size,
1082 (vmf->flags & FAULT_FLAG_WRITE));
1083}
1084
1085static int
1086xfs_filemap_page_mkwrite(
1087 struct vm_fault *vmf)
1088{
1089 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1090}
1091
Dave Chinner3af49282015-11-03 12:37:02 +11001092/*
1093 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1094 * updates on write faults. In reality, it's need to serialise against
Ross Zwisler5eb88dc2016-01-22 15:10:56 -08001095 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1096 * to ensure we serialise the fault barrier in place.
Dave Chinner3af49282015-11-03 12:37:02 +11001097 */
1098static int
1099xfs_filemap_pfn_mkwrite(
Dave Chinner3af49282015-11-03 12:37:02 +11001100 struct vm_fault *vmf)
1101{
1102
Dave Jiang11bac802017-02-24 14:56:41 -08001103 struct inode *inode = file_inode(vmf->vma->vm_file);
Dave Chinner3af49282015-11-03 12:37:02 +11001104 struct xfs_inode *ip = XFS_I(inode);
1105 int ret = VM_FAULT_NOPAGE;
1106 loff_t size;
1107
1108 trace_xfs_filemap_pfn_mkwrite(ip);
1109
1110 sb_start_pagefault(inode->i_sb);
Dave Jiang11bac802017-02-24 14:56:41 -08001111 file_update_time(vmf->vma->vm_file);
Dave Chinner3af49282015-11-03 12:37:02 +11001112
1113 /* check if the faulting page hasn't raced with truncate */
1114 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1115 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1116 if (vmf->pgoff >= size)
1117 ret = VM_FAULT_SIGBUS;
Ross Zwisler5eb88dc2016-01-22 15:10:56 -08001118 else if (IS_DAX(inode))
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001119 ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
Dave Chinner3af49282015-11-03 12:37:02 +11001120 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1121 sb_end_pagefault(inode->i_sb);
1122 return ret;
1123
1124}
1125
Dave Chinner6b698ed2015-06-04 09:18:53 +10001126static const struct vm_operations_struct xfs_file_vm_ops = {
1127 .fault = xfs_filemap_fault,
Dave Jianga2d58162017-02-24 14:56:59 -08001128 .huge_fault = xfs_filemap_huge_fault,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001129 .map_pages = filemap_map_pages,
1130 .page_mkwrite = xfs_filemap_page_mkwrite,
Dave Chinner3af49282015-11-03 12:37:02 +11001131 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001132};
1133
1134STATIC int
1135xfs_file_mmap(
1136 struct file *filp,
1137 struct vm_area_struct *vma)
1138{
1139 file_accessed(filp);
1140 vma->vm_ops = &xfs_file_vm_ops;
1141 if (IS_DAX(file_inode(filp)))
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001142 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001143 return 0;
Dave Chinner075a9242015-02-23 21:44:54 +11001144}
1145
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001146const struct file_operations xfs_file_operations = {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001147 .llseek = xfs_file_llseek,
Al Virob4f5d2c2014-04-02 14:37:59 -04001148 .read_iter = xfs_file_read_iter,
Al Virobf97f3bc2014-04-03 14:20:23 -04001149 .write_iter = xfs_file_write_iter,
Al Viro82c156f2016-09-22 23:35:42 -04001150 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001151 .splice_write = iter_file_splice_write,
Nathan Scott3562fd42006-03-14 14:00:35 +11001152 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001154 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001156 .mmap = xfs_file_mmap,
1157 .open = xfs_file_open,
1158 .release = xfs_file_release,
1159 .fsync = xfs_file_fsync,
Toshi Kanidbe6ec82016-10-07 16:59:59 -07001160 .get_unmapped_area = thp_get_unmapped_area,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001161 .fallocate = xfs_file_fallocate,
Darrick J. Wong9fe26042016-10-03 09:11:40 -07001162 .clone_file_range = xfs_file_clone_range,
Darrick J. Wongcc714662016-10-03 09:11:41 -07001163 .dedupe_file_range = xfs_file_dedupe_range,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164};
1165
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001166const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001167 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 .read = generic_read_dir,
Al Viro3b0a3c12016-04-20 23:42:46 -04001169 .iterate_shared = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001170 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001171 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001172#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001173 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001174#endif
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +00001175 .fsync = xfs_dir_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176};