blob: e93bacbd49aed89946e22effc1c47b3026ec865b [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +00007#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +11009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include "xfs_mount.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110014#include "xfs_trans.h"
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000015#include "xfs_inode_item.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000016#include "xfs_bmap.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100017#include "xfs_bmap_util.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100018#include "xfs_dir2.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100019#include "xfs_dir2_priv.h"
Christoph Hellwigddcd8562008-12-03 07:55:34 -050020#include "xfs_ioctl.h"
Christoph Hellwigdda35b82010-02-15 09:44:46 +000021#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_log.h"
Brian Fosterdc06f3982014-07-24 19:49:28 +100023#include "xfs_icache.h"
Christoph Hellwig781355c2015-02-16 11:59:50 +110024#include "xfs_pnfs.h"
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +100025#include "xfs_iomap.h"
Darrick J. Wong0613f162016-10-03 09:11:37 -070026#include "xfs_reflink.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Christoph Hellwig2fe17c12011-01-14 13:07:43 +010028#include <linux/falloc.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040029#include <linux/backing-dev.h>
Christoph Hellwiga39e5962017-11-01 16:36:47 +010030#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040032static const struct vm_operations_struct xfs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Christoph Hellwig8add71c2015-02-02 09:53:56 +110034int
35xfs_update_prealloc_flags(
36 struct xfs_inode *ip,
37 enum xfs_prealloc_flags flags)
38{
39 struct xfs_trans *tp;
40 int error;
41
Christoph Hellwig253f4912016-04-06 09:19:55 +100042 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
43 0, 0, 0, &tp);
44 if (error)
Christoph Hellwig8add71c2015-02-02 09:53:56 +110045 return error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110046
47 xfs_ilock(ip, XFS_ILOCK_EXCL);
48 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
49
50 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
Dave Chinnerc19b3b052016-02-09 16:54:58 +110051 VFS_I(ip)->i_mode &= ~S_ISUID;
52 if (VFS_I(ip)->i_mode & S_IXGRP)
53 VFS_I(ip)->i_mode &= ~S_ISGID;
Christoph Hellwig8add71c2015-02-02 09:53:56 +110054 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
55 }
56
57 if (flags & XFS_PREALLOC_SET)
58 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
59 if (flags & XFS_PREALLOC_CLEAR)
60 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
61
62 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
63 if (flags & XFS_PREALLOC_SYNC)
64 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +100065 return xfs_trans_commit(tp);
Christoph Hellwig8add71c2015-02-02 09:53:56 +110066}
67
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000068/*
69 * Fsync operations on directories are much simpler than on regular files,
70 * as there is no file data to flush, and thus also no need for explicit
71 * cache flush operations, and there are no non-transaction metadata updates
72 * on directories either.
73 */
74STATIC int
75xfs_dir_fsync(
76 struct file *file,
77 loff_t start,
78 loff_t end,
79 int datasync)
80{
81 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
82 struct xfs_mount *mp = ip->i_mount;
83 xfs_lsn_t lsn = 0;
84
85 trace_xfs_dir_fsync(ip);
86
87 xfs_ilock(ip, XFS_ILOCK_SHARED);
88 if (xfs_ipincount(ip))
89 lsn = ip->i_itemp->ili_last_lsn;
90 xfs_iunlock(ip, XFS_ILOCK_SHARED);
91
92 if (!lsn)
93 return 0;
Christoph Hellwig656de4f2018-03-13 23:15:28 -070094 return xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +000095}
96
Christoph Hellwigfd3200b2010-02-15 09:44:48 +000097STATIC int
98xfs_file_fsync(
99 struct file *file,
Josef Bacik02c24a82011-07-16 20:44:56 -0400100 loff_t start,
101 loff_t end,
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000102 int datasync)
103{
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200104 struct inode *inode = file->f_mapping->host;
105 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000106 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000107 int error = 0;
108 int log_flushed = 0;
Christoph Hellwigb1037052011-09-19 14:55:51 +0000109 xfs_lsn_t lsn = 0;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000110
Christoph Hellwigcca28fb2010-06-24 11:57:09 +1000111 trace_xfs_file_fsync(ip);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000112
Jeff Layton1b180272017-07-06 07:02:30 -0400113 error = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -0400114 if (error)
115 return error;
116
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000117 if (XFS_FORCED_SHUTDOWN(mp))
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000118 return -EIO;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000119
120 xfs_iflags_clear(ip, XFS_ITRUNCATED);
121
Dave Chinner2291dab2016-12-09 16:49:54 +1100122 /*
123 * If we have an RT and/or log subvolume we need to make sure to flush
124 * the write cache the device used for file data first. This is to
125 * ensure newly written file data make it to disk before logging the new
126 * inode size in case of an extending write.
127 */
128 if (XFS_IS_REALTIME_INODE(ip))
129 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
130 else if (mp->m_logdev_targp != mp->m_ddev_targp)
131 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000132
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000133 /*
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100134 * All metadata updates are logged, which means that we just have to
135 * flush the log up to the latest LSN that touched the inode. If we have
136 * concurrent fsync/fdatasync() calls, we need them to all block on the
137 * log force before we clear the ili_fsync_fields field. This ensures
138 * that we don't get a racing sync operation that does not wait for the
139 * metadata to hit the journal before returning. If we race with
140 * clearing the ili_fsync_fields, then all that will happen is the log
141 * force will do nothing as the lsn will already be on disk. We can't
142 * race with setting ili_fsync_fields because that is done under
143 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
144 * until after the ili_fsync_fields is cleared.
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000145 */
146 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000147 if (xfs_ipincount(ip)) {
148 if (!datasync ||
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100149 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
Christoph Hellwig8f639dd2012-02-29 09:53:55 +0000150 lsn = ip->i_itemp->ili_last_lsn;
151 }
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000152
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100153 if (lsn) {
Christoph Hellwig656de4f2018-03-13 23:15:28 -0700154 error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
Dave Chinnerfc0561c2015-11-03 13:14:59 +1100155 ip->i_itemp->ili_fsync_fields = 0;
156 }
157 xfs_iunlock(ip, XFS_ILOCK_SHARED);
Christoph Hellwigb1037052011-09-19 14:55:51 +0000158
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000159 /*
160 * If we only have a single device, and the log force about was
161 * a no-op we might have to flush the data device cache here.
162 * This can only happen for fdatasync/O_DSYNC if we were overwriting
163 * an already allocated file and thus do not have any metadata to
164 * commit.
165 */
Dave Chinner2291dab2016-12-09 16:49:54 +1100166 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
167 mp->m_logdev_targp == mp->m_ddev_targp)
Christoph Hellwiga27a2632011-06-16 12:02:23 +0000168 xfs_blkdev_issue_flush(mp->m_ddev_targp);
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000169
Dave Chinner24513372014-06-25 14:58:08 +1000170 return error;
Christoph Hellwigfd3200b2010-02-15 09:44:48 +0000171}
172
Christoph Hellwig00258e32010-02-15 09:44:47 +0000173STATIC ssize_t
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000174xfs_file_dio_aio_read(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000175 struct kiocb *iocb,
Al Virob4f5d2c2014-04-02 14:37:59 -0400176 struct iov_iter *to)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000177{
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100178 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000179 size_t count = iov_iter_count(to);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100180 ssize_t ret;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000181
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000182 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000183
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000184 if (!count)
185 return 0; /* skip atime */
Christoph Hellwig00258e32010-02-15 09:44:47 +0000186
Christoph Hellwiga447d7c2016-10-03 09:47:34 +1100187 file_accessed(iocb->ki_filp);
188
Christoph Hellwig65523212016-11-30 14:33:25 +1100189 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100190 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
Christoph Hellwig65523212016-11-30 14:33:25 +1100191 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100192
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000193 return ret;
194}
195
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000196static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000197xfs_file_dax_read(
198 struct kiocb *iocb,
199 struct iov_iter *to)
200{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000201 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000202 size_t count = iov_iter_count(to);
203 ssize_t ret = 0;
204
205 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
206
207 if (!count)
208 return 0; /* skip atime */
209
Christoph Hellwig942491c2017-10-23 18:31:50 -0700210 if (iocb->ki_flags & IOCB_NOWAIT) {
211 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500212 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700213 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500214 xfs_ilock(ip, XFS_IOLOCK_SHARED);
215 }
Christoph Hellwig942491c2017-10-23 18:31:50 -0700216
Ross Zwisler11c59c92016-11-08 11:32:46 +1100217 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
Christoph Hellwig65523212016-11-30 14:33:25 +1100218 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000219
Christoph Hellwigf1285ff2016-07-20 11:36:57 +1000220 file_accessed(iocb->ki_filp);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000221 return ret;
222}
223
224STATIC ssize_t
225xfs_file_buffered_aio_read(
226 struct kiocb *iocb,
227 struct iov_iter *to)
228{
229 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
230 ssize_t ret;
231
232 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
233
Christoph Hellwig942491c2017-10-23 18:31:50 -0700234 if (iocb->ki_flags & IOCB_NOWAIT) {
235 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
Christoph Hellwig91f99432017-08-29 16:13:20 +0200236 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700237 } else {
Christoph Hellwig91f99432017-08-29 16:13:20 +0200238 xfs_ilock(ip, XFS_IOLOCK_SHARED);
239 }
Al Virob4f5d2c2014-04-02 14:37:59 -0400240 ret = generic_file_read_iter(iocb, to);
Christoph Hellwig65523212016-11-30 14:33:25 +1100241 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000242
243 return ret;
244}
245
246STATIC ssize_t
247xfs_file_read_iter(
248 struct kiocb *iocb,
249 struct iov_iter *to)
250{
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000251 struct inode *inode = file_inode(iocb->ki_filp);
252 struct xfs_mount *mp = XFS_I(inode)->i_mount;
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000253 ssize_t ret = 0;
254
255 XFS_STATS_INC(mp, xs_read_calls);
256
257 if (XFS_FORCED_SHUTDOWN(mp))
258 return -EIO;
259
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000260 if (IS_DAX(inode))
261 ret = xfs_file_dax_read(iocb, to);
262 else if (iocb->ki_flags & IOCB_DIRECT)
Christoph Hellwigbbc5a742016-07-20 11:35:42 +1000263 ret = xfs_file_dio_aio_read(iocb, to);
264 else
265 ret = xfs_file_buffered_aio_read(iocb, to);
266
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000267 if (ret > 0)
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100268 XFS_STATS_ADD(mp, xs_read_bytes, ret);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000269 return ret;
270}
271
Dave Chinner4c5cfd12011-01-11 10:14:16 +1100272/*
Dave Chinner4d8d1582011-01-11 10:23:42 +1100273 * Common pre-write limit and setup checks.
274 *
Christoph Hellwig5bf1f262011-12-18 20:00:13 +0000275 * Called with the iolocked held either shared and exclusive according to
276 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
277 * if called for a direct write beyond i_size.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100278 */
279STATIC ssize_t
280xfs_file_aio_write_checks(
Al Viro99733fa2015-04-07 14:25:18 -0400281 struct kiocb *iocb,
282 struct iov_iter *from,
Dave Chinner4d8d1582011-01-11 10:23:42 +1100283 int *iolock)
284{
Al Viro99733fa2015-04-07 14:25:18 -0400285 struct file *file = iocb->ki_filp;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100286 struct inode *inode = file->f_mapping->host;
287 struct xfs_inode *ip = XFS_I(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400288 ssize_t error = 0;
Al Viro99733fa2015-04-07 14:25:18 -0400289 size_t count = iov_iter_count(from);
Brian Foster3136e8b2015-10-12 16:02:05 +1100290 bool drained_dio = false;
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700291 loff_t isize;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100292
Dave Chinner7271d242011-08-25 07:17:02 +0000293restart:
Al Viro3309dd02015-04-09 12:55:47 -0400294 error = generic_write_checks(iocb, from);
295 if (error <= 0)
Dave Chinner4d8d1582011-01-11 10:23:42 +1100296 return error;
Dave Chinner4d8d1582011-01-11 10:23:42 +1100297
Dan Williams69eb5fa2018-03-20 14:42:38 -0700298 error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100299 if (error)
300 return error;
301
Christoph Hellwig65523212016-11-30 14:33:25 +1100302 /*
303 * For changing security info in file_remove_privs() we need i_rwsem
304 * exclusively.
305 */
Jan Karaa6de82c2015-05-21 16:05:56 +0200306 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100307 xfs_iunlock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200308 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100309 xfs_ilock(ip, *iolock);
Jan Karaa6de82c2015-05-21 16:05:56 +0200310 goto restart;
311 }
Dave Chinner4d8d1582011-01-11 10:23:42 +1100312 /*
313 * If the offset is beyond the size of the file, we need to zero any
314 * blocks that fall between the existing EOF and the start of this
Christoph Hellwig2813d682011-12-18 20:00:12 +0000315 * write. If zeroing is needed and we are currently holding the
Christoph Hellwig467f7892012-03-27 10:34:47 -0400316 * iolock shared, we need to update it to exclusive which implies
317 * having to redo all checks before.
Dave Chinnerb9d59842015-04-16 22:03:07 +1000318 *
319 * We need to serialise against EOF updates that occur in IO
320 * completions here. We want to make sure that nobody is changing the
321 * size while we do this check until we have placed an IO barrier (i.e.
322 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
323 * The spinlock effectively forms a memory barrier once we have the
324 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
325 * and hence be able to correctly determine if we need to run zeroing.
Dave Chinner4d8d1582011-01-11 10:23:42 +1100326 */
Dave Chinnerb9d59842015-04-16 22:03:07 +1000327 spin_lock(&ip->i_flags_lock);
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700328 isize = i_size_read(inode);
329 if (iocb->ki_pos > isize) {
Dave Chinnerb9d59842015-04-16 22:03:07 +1000330 spin_unlock(&ip->i_flags_lock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100331 if (!drained_dio) {
332 if (*iolock == XFS_IOLOCK_SHARED) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100333 xfs_iunlock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100334 *iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100335 xfs_ilock(ip, *iolock);
Brian Foster3136e8b2015-10-12 16:02:05 +1100336 iov_iter_reexpand(from, count);
337 }
Dave Chinner40c63fb2015-04-16 22:03:17 +1000338 /*
339 * We now have an IO submission barrier in place, but
340 * AIO can do EOF updates during IO completion and hence
341 * we now need to wait for all of them to drain. Non-AIO
342 * DIO will have drained before we are given the
343 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
344 * no-op.
345 */
346 inode_dio_wait(inode);
Brian Foster3136e8b2015-10-12 16:02:05 +1100347 drained_dio = true;
Dave Chinner7271d242011-08-25 07:17:02 +0000348 goto restart;
349 }
Christoph Hellwigf5c547172018-03-13 23:15:32 -0700350
351 trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
352 error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
353 NULL, &xfs_iomap_ops);
Christoph Hellwig467f7892012-03-27 10:34:47 -0400354 if (error)
355 return error;
Dave Chinnerb9d59842015-04-16 22:03:07 +1000356 } else
357 spin_unlock(&ip->i_flags_lock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100358
359 /*
Christoph Hellwig8a9c9982012-02-29 09:53:52 +0000360 * Updating the timestamps will grab the ilock again from
361 * xfs_fs_dirty_inode, so we have to call it after dropping the
362 * lock above. Eventually we should look into a way to avoid
363 * the pointless lock roundtrip.
364 */
Amir Goldstein8c3f4062019-06-05 08:04:50 -0700365 return file_modified(file);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100366}
367
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100368static int
369xfs_dio_write_end_io(
370 struct kiocb *iocb,
371 ssize_t size,
372 unsigned flags)
373{
374 struct inode *inode = file_inode(iocb->ki_filp);
375 struct xfs_inode *ip = XFS_I(inode);
376 loff_t offset = iocb->ki_pos;
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700377 unsigned int nofs_flag;
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100378 int error = 0;
379
380 trace_xfs_end_io_direct_write(ip, offset, size);
381
382 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
383 return -EIO;
384
385 if (size <= 0)
386 return size;
387
Dave Chinnered5c3e62018-05-02 12:54:52 -0700388 /*
389 * Capture amount written on completion as we can't reliably account
390 * for it on submission.
391 */
392 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
393
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700394 /*
395 * We can allocate memory here while doing writeback on behalf of
396 * memory reclaim. To avoid memory allocation deadlocks set the
397 * task-wide nofs context for the following operations.
398 */
399 nofs_flag = memalloc_nofs_save();
400
Eryu Guanee70daa2017-09-21 11:26:18 -0700401 if (flags & IOMAP_DIO_COW) {
402 error = xfs_reflink_end_cow(ip, offset, size);
403 if (error)
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700404 goto out;
Eryu Guanee70daa2017-09-21 11:26:18 -0700405 }
406
407 /*
408 * Unwritten conversion updates the in-core isize after extent
409 * conversion but before updating the on-disk size. Updating isize any
410 * earlier allows a racing dio read to find unwritten extents before
411 * they are converted.
412 */
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700413 if (flags & IOMAP_DIO_UNWRITTEN) {
414 error = xfs_iomap_write_unwritten(ip, offset, size, true);
415 goto out;
416 }
Eryu Guanee70daa2017-09-21 11:26:18 -0700417
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100418 /*
419 * We need to update the in-core inode size here so that we don't end up
420 * with the on-disk inode size being outside the in-core inode size. We
421 * have no other method of updating EOF for AIO, so always do it here
422 * if necessary.
423 *
424 * We need to lock the test/set EOF update as we can be racing with
425 * other IO completions here to update the EOF. Failing to serialise
426 * here can result in EOF moving backwards and Bad Things Happen when
427 * that occurs.
428 */
429 spin_lock(&ip->i_flags_lock);
430 if (offset + size > i_size_read(inode)) {
431 i_size_write(inode, offset + size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700432 spin_unlock(&ip->i_flags_lock);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100433 error = xfs_setfilesize(ip, offset, size);
Eryu Guanee70daa2017-09-21 11:26:18 -0700434 } else {
435 spin_unlock(&ip->i_flags_lock);
436 }
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100437
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700438out:
439 memalloc_nofs_restore(nofs_flag);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100440 return error;
441}
442
Dave Chinner4d8d1582011-01-11 10:23:42 +1100443/*
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100444 * xfs_file_dio_aio_write - handle direct IO writes
445 *
446 * Lock the inode appropriately to prepare for and issue a direct IO write.
Dave Chinnereda77982011-01-11 10:22:40 +1100447 * By separating it from the buffered write path we remove all the tricky to
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100448 * follow locking changes and looping.
449 *
Dave Chinnereda77982011-01-11 10:22:40 +1100450 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
451 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
452 * pages are flushed out.
453 *
454 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
455 * allowing them to be done in parallel with reads and other direct IO writes.
456 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
457 * needs to do sub-block zeroing and that requires serialisation against other
458 * direct IOs to the same block. In this case we need to serialise the
459 * submission of the unaligned IOs so that we don't get racing block zeroing in
460 * the dio layer. To avoid the problem with aio, we also need to wait for
461 * outstanding IOs to complete so that unwritten extent conversion is completed
462 * before we try to map the overlapping block. This is currently implemented by
Christoph Hellwig4a06fd22011-08-23 08:28:13 +0000463 * hitting it with a big hammer (i.e. inode_dio_wait()).
Dave Chinnereda77982011-01-11 10:22:40 +1100464 *
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100465 * Returns with locks held indicated by @iolock and errors indicated by
466 * negative return values.
467 */
468STATIC ssize_t
469xfs_file_dio_aio_write(
470 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400471 struct iov_iter *from)
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100472{
473 struct file *file = iocb->ki_filp;
474 struct address_space *mapping = file->f_mapping;
475 struct inode *inode = mapping->host;
476 struct xfs_inode *ip = XFS_I(inode);
477 struct xfs_mount *mp = ip->i_mount;
478 ssize_t ret = 0;
Dave Chinnereda77982011-01-11 10:22:40 +1100479 int unaligned_io = 0;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000480 int iolock;
Al Virob3188912014-04-02 07:06:30 -0400481 size_t count = iov_iter_count(from);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100482 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100483 mp->m_rtdev_targp : mp->m_ddev_targp;
484
Eric Sandeen7c71ee72014-01-21 16:46:23 -0600485 /* DIO must be aligned to device logical sector size */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000486 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
Eric Sandeenb474c7a2014-06-22 15:04:54 +1000487 return -EINVAL;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100488
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100489 /*
490 * Don't take the exclusive iolock here unless the I/O is unaligned to
491 * the file system block size. We don't need to consider the EOF
492 * extension case here because xfs_file_aio_write_checks() will relock
493 * the inode as necessary for EOF zeroing cases and fill out the new
494 * inode size as appropriate.
495 */
Christoph Hellwig13712712016-04-07 08:51:57 -0700496 if ((iocb->ki_pos & mp->m_blockmask) ||
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100497 ((iocb->ki_pos + count) & mp->m_blockmask)) {
Dave Chinnereda77982011-01-11 10:22:40 +1100498 unaligned_io = 1;
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800499
500 /*
501 * We can't properly handle unaligned direct I/O to reflink
502 * files yet, as we can't unshare a partial block.
503 */
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800504 if (xfs_is_cow_inode(ip)) {
Christoph Hellwig54a4ef82017-02-06 13:00:54 -0800505 trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
506 return -EREMCHG;
507 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000508 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100509 } else {
Christoph Hellwigd0606462011-12-18 20:00:14 +0000510 iolock = XFS_IOLOCK_SHARED;
Christoph Hellwigc58cb162011-08-27 14:42:53 +0000511 }
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100512
Christoph Hellwig942491c2017-10-23 18:31:50 -0700513 if (iocb->ki_flags & IOCB_NOWAIT) {
Darrick J. Wong1fdeaea2019-04-17 08:49:36 -0700514 /* unaligned dio always waits, bail */
515 if (unaligned_io)
516 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700517 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500518 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700519 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500520 xfs_ilock(ip, iolock);
521 }
Christoph Hellwig0ee7a3f2016-10-20 15:44:14 +1100522
Al Viro99733fa2015-04-07 14:25:18 -0400523 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100524 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000525 goto out;
Al Viro99733fa2015-04-07 14:25:18 -0400526 count = iov_iter_count(from);
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100527
Dave Chinnereda77982011-01-11 10:22:40 +1100528 /*
Brian Foster2032a8a2019-03-25 17:01:45 -0700529 * If we are doing unaligned IO, we can't allow any other overlapping IO
530 * in-flight at the same time or we risk data corruption. Wait for all
531 * other IO to drain before we submit. If the IO is aligned, demote the
532 * iolock if we had to take the exclusive lock in
533 * xfs_file_aio_write_checks() for other reasons.
Dave Chinnereda77982011-01-11 10:22:40 +1100534 */
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500535 if (unaligned_io) {
Brian Foster2032a8a2019-03-25 17:01:45 -0700536 inode_dio_wait(inode);
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500537 } else if (iolock == XFS_IOLOCK_EXCL) {
Christoph Hellwig65523212016-11-30 14:33:25 +1100538 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000539 iolock = XFS_IOLOCK_SHARED;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100540 }
541
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000542 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
Christoph Hellwigacdda3a2016-11-30 14:37:15 +1100543 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
Brian Foster2032a8a2019-03-25 17:01:45 -0700544
545 /*
546 * If unaligned, this is the only IO in-flight. If it has not yet
547 * completed, wait on it before we release the iolock to prevent
548 * subsequent overlapping IO.
549 */
550 if (ret == -EIOCBQUEUED && unaligned_io)
551 inode_dio_wait(inode);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000552out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100553 xfs_iunlock(ip, iolock);
Christoph Hellwigd0606462011-12-18 20:00:14 +0000554
Dave Chinner6b698ed2015-06-04 09:18:53 +1000555 /*
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000556 * No fallback to buffered IO on errors for XFS, direct IO will either
557 * complete fully or fail.
Dave Chinner6b698ed2015-06-04 09:18:53 +1000558 */
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000559 ASSERT(ret < 0 || ret == count);
560 return ret;
561}
562
Arnd Bergmannf021bd02016-07-22 09:50:55 +1000563static noinline ssize_t
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000564xfs_file_dax_write(
565 struct kiocb *iocb,
566 struct iov_iter *from)
567{
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000568 struct inode *inode = iocb->ki_filp->f_mapping->host;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000569 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig17879e82016-09-19 11:24:50 +1000570 int iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000571 ssize_t ret, error = 0;
572 size_t count;
573 loff_t pos;
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000574
Christoph Hellwig942491c2017-10-23 18:31:50 -0700575 if (iocb->ki_flags & IOCB_NOWAIT) {
576 if (!xfs_ilock_nowait(ip, iolock))
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500577 return -EAGAIN;
Christoph Hellwig942491c2017-10-23 18:31:50 -0700578 } else {
Goldwyn Rodrigues29a5d292017-06-20 07:05:48 -0500579 xfs_ilock(ip, iolock);
580 }
581
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000582 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
583 if (ret)
584 goto out;
585
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000586 pos = iocb->ki_pos;
587 count = iov_iter_count(from);
Dave Chinner8b2180b2016-08-17 08:31:33 +1000588
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000589 trace_xfs_file_dax_write(ip, count, pos);
Ross Zwisler11c59c92016-11-08 11:32:46 +1100590 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
Christoph Hellwig6c31f4952016-09-19 11:28:38 +1000591 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
592 i_size_write(inode, iocb->ki_pos);
593 error = xfs_setfilesize(ip, pos, ret);
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000594 }
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000595out:
Christoph Hellwig65523212016-11-30 14:33:25 +1100596 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700597 if (error)
598 return error;
599
600 if (ret > 0) {
601 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
602
603 /* Handle various SYNC-type writes */
604 ret = generic_write_sync(iocb, ret);
605 }
606 return ret;
Dave Chinnerf0d26e82011-01-11 10:15:36 +1100607}
608
Christoph Hellwig00258e32010-02-15 09:44:47 +0000609STATIC ssize_t
Dave Chinner637bbc72011-01-11 10:17:30 +1100610xfs_file_buffered_aio_write(
611 struct kiocb *iocb,
Al Virob3188912014-04-02 07:06:30 -0400612 struct iov_iter *from)
Dave Chinner637bbc72011-01-11 10:17:30 +1100613{
614 struct file *file = iocb->ki_filp;
615 struct address_space *mapping = file->f_mapping;
616 struct inode *inode = mapping->host;
617 struct xfs_inode *ip = XFS_I(inode);
618 ssize_t ret;
619 int enospc = 0;
Brian Fosterc3155092017-01-27 23:22:56 -0800620 int iolock;
Dave Chinner637bbc72011-01-11 10:17:30 +1100621
Christoph Hellwig91f99432017-08-29 16:13:20 +0200622 if (iocb->ki_flags & IOCB_NOWAIT)
623 return -EOPNOTSUPP;
624
Brian Fosterc3155092017-01-27 23:22:56 -0800625write_retry:
626 iolock = XFS_IOLOCK_EXCL;
Christoph Hellwig65523212016-11-30 14:33:25 +1100627 xfs_ilock(ip, iolock);
Dave Chinner637bbc72011-01-11 10:17:30 +1100628
Al Viro99733fa2015-04-07 14:25:18 -0400629 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
Dave Chinner4d8d1582011-01-11 10:23:42 +1100630 if (ret)
Christoph Hellwigd0606462011-12-18 20:00:14 +0000631 goto out;
Dave Chinner637bbc72011-01-11 10:17:30 +1100632
633 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100634 current->backing_dev_info = inode_to_bdi(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100635
Christoph Hellwig3176c3e2016-07-20 11:31:42 +1000636 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
Christoph Hellwig68a9f5e2016-06-21 09:53:44 +1000637 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
Al Viro0a64bc22014-02-11 22:25:22 -0500638 if (likely(ret >= 0))
Al Viro99733fa2015-04-07 14:25:18 -0400639 iocb->ki_pos += ret;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000640
Dave Chinner637bbc72011-01-11 10:17:30 +1100641 /*
Brian Fosterdc06f3982014-07-24 19:49:28 +1000642 * If we hit a space limit, try to free up some lingering preallocated
643 * space before returning an error. In the case of ENOSPC, first try to
644 * write back all dirty inodes to free up some of the excess reserved
645 * metadata space. This reduces the chances that the eofblocks scan
646 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
647 * also behaves as a filter to prevent too many eofblocks scans from
648 * running at the same time.
Dave Chinner637bbc72011-01-11 10:17:30 +1100649 */
Brian Fosterdc06f3982014-07-24 19:49:28 +1000650 if (ret == -EDQUOT && !enospc) {
Brian Fosterc3155092017-01-27 23:22:56 -0800651 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000652 enospc = xfs_inode_free_quota_eofblocks(ip);
653 if (enospc)
654 goto write_retry;
Darrick J. Wong83104d42016-10-03 09:11:46 -0700655 enospc = xfs_inode_free_quota_cowblocks(ip);
656 if (enospc)
657 goto write_retry;
Brian Fosterc3155092017-01-27 23:22:56 -0800658 iolock = 0;
Brian Fosterdc06f3982014-07-24 19:49:28 +1000659 } else if (ret == -ENOSPC && !enospc) {
660 struct xfs_eofblocks eofb = {0};
661
Dave Chinner637bbc72011-01-11 10:17:30 +1100662 enospc = 1;
Dave Chinner9aa05002012-10-08 21:56:04 +1100663 xfs_flush_inodes(ip->i_mount);
Brian Fosterc3155092017-01-27 23:22:56 -0800664
665 xfs_iunlock(ip, iolock);
Brian Fosterdc06f3982014-07-24 19:49:28 +1000666 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
667 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
Brian Fostercf2cb782017-06-20 14:36:19 -0700668 xfs_icache_free_cowblocks(ip->i_mount, &eofb);
Dave Chinner9aa05002012-10-08 21:56:04 +1100669 goto write_retry;
Dave Chinner637bbc72011-01-11 10:17:30 +1100670 }
Christoph Hellwigd0606462011-12-18 20:00:14 +0000671
Dave Chinner637bbc72011-01-11 10:17:30 +1100672 current->backing_dev_info = NULL;
Christoph Hellwigd0606462011-12-18 20:00:14 +0000673out:
Brian Fosterc3155092017-01-27 23:22:56 -0800674 if (iolock)
675 xfs_iunlock(ip, iolock);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700676
677 if (ret > 0) {
678 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
679 /* Handle various SYNC-type writes */
680 ret = generic_write_sync(iocb, ret);
681 }
Dave Chinner637bbc72011-01-11 10:17:30 +1100682 return ret;
683}
684
685STATIC ssize_t
Al Virobf97f3bc2014-04-03 14:20:23 -0400686xfs_file_write_iter(
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000687 struct kiocb *iocb,
Al Virobf97f3bc2014-04-03 14:20:23 -0400688 struct iov_iter *from)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000689{
690 struct file *file = iocb->ki_filp;
691 struct address_space *mapping = file->f_mapping;
692 struct inode *inode = mapping->host;
Christoph Hellwig00258e32010-02-15 09:44:47 +0000693 struct xfs_inode *ip = XFS_I(inode);
Dave Chinner637bbc72011-01-11 10:17:30 +1100694 ssize_t ret;
Al Virobf97f3bc2014-04-03 14:20:23 -0400695 size_t ocount = iov_iter_count(from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000696
Bill O'Donnellff6d6af2015-10-12 18:21:22 +1100697 XFS_STATS_INC(ip->i_mount, xs_write_calls);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000698
Dave Chinner637bbc72011-01-11 10:17:30 +1100699 if (ocount == 0)
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000700 return 0;
701
Al Virobf97f3bc2014-04-03 14:20:23 -0400702 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
703 return -EIO;
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000704
Christoph Hellwig16d4d432016-07-20 11:38:55 +1000705 if (IS_DAX(inode))
Dave Chinnered5c3e62018-05-02 12:54:52 -0700706 return xfs_file_dax_write(iocb, from);
707
708 if (iocb->ki_flags & IOCB_DIRECT) {
Darrick J. Wong0613f162016-10-03 09:11:37 -0700709 /*
710 * Allow a directio write to fall back to a buffered
711 * write *only* in the case that we're doing a reflink
712 * CoW. In all other directio scenarios we do not
713 * allow an operation to fall back to buffered mode.
714 */
Al Virobf97f3bc2014-04-03 14:20:23 -0400715 ret = xfs_file_dio_aio_write(iocb, from);
Dave Chinnered5c3e62018-05-02 12:54:52 -0700716 if (ret != -EREMCHG)
717 return ret;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700718 }
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000719
Dave Chinnered5c3e62018-05-02 12:54:52 -0700720 return xfs_file_buffered_aio_write(iocb, from);
Christoph Hellwigdda35b82010-02-15 09:44:46 +0000721}
722
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700723static void
724xfs_wait_dax_page(
Dave Jiange25ff832018-08-10 08:48:18 -0700725 struct inode *inode)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700726{
727 struct xfs_inode *ip = XFS_I(inode);
728
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700729 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
730 schedule();
731 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
732}
733
734static int
735xfs_break_dax_layouts(
736 struct inode *inode,
Dave Jiange25ff832018-08-10 08:48:18 -0700737 bool *retry)
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700738{
739 struct page *page;
740
741 ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
742
743 page = dax_layout_busy_page(inode->i_mapping);
744 if (!page)
745 return 0;
746
Dave Jiange25ff832018-08-10 08:48:18 -0700747 *retry = true;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700748 return ___wait_var_event(&page->_refcount,
749 atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
Dave Jiange25ff832018-08-10 08:48:18 -0700750 0, 0, xfs_wait_dax_page(inode));
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700751}
752
Dan Williams69eb5fa2018-03-20 14:42:38 -0700753int
754xfs_break_layouts(
755 struct inode *inode,
756 uint *iolock,
757 enum layout_break_reason reason)
758{
759 bool retry;
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700760 int error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700761
762 ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
763
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700764 do {
765 retry = false;
766 switch (reason) {
767 case BREAK_UNMAP:
Eric Sandeena4722a62018-07-11 22:26:36 -0700768 error = xfs_break_dax_layouts(inode, &retry);
Dan Williamsd6dc57e2018-05-09 15:47:49 -0700769 if (error || retry)
770 break;
771 /* fall through */
772 case BREAK_WRITE:
773 error = xfs_break_leased_layouts(inode, iolock, &retry);
774 break;
775 default:
776 WARN_ON_ONCE(1);
777 error = -EINVAL;
778 }
779 } while (error == 0 && retry);
780
781 return error;
Dan Williams69eb5fa2018-03-20 14:42:38 -0700782}
783
Namjae Jeona904b1c2015-03-25 15:08:56 +1100784#define XFS_FALLOC_FL_SUPPORTED \
785 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
786 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700787 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
Namjae Jeona904b1c2015-03-25 15:08:56 +1100788
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100789STATIC long
790xfs_file_fallocate(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700791 struct file *file,
792 int mode,
793 loff_t offset,
794 loff_t len)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100795{
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700796 struct inode *inode = file_inode(file);
797 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700798 long error;
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100799 enum xfs_prealloc_flags flags = 0;
Dan Williamsc63a8ea2018-03-12 14:12:29 -0700800 uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700801 loff_t new_size = 0;
Thomas Meyer749f24f2017-10-09 11:38:54 -0700802 bool do_file_insert = false;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100803
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700804 if (!S_ISREG(inode->i_mode))
805 return -EINVAL;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100806 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100807 return -EOPNOTSUPP;
808
Christoph Hellwig781355c2015-02-16 11:59:50 +1100809 xfs_ilock(ip, iolock);
Dan Williams69eb5fa2018-03-20 14:42:38 -0700810 error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
Christoph Hellwig781355c2015-02-16 11:59:50 +1100811 if (error)
812 goto out_unlock;
813
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700814 if (mode & FALLOC_FL_PUNCH_HOLE) {
815 error = xfs_free_file_space(ip, offset, len);
816 if (error)
817 goto out_unlock;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100818 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
Fabian Frederick93407472017-02-27 14:28:32 -0800819 unsigned int blksize_mask = i_blocksize(inode) - 1;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100820
821 if (offset & blksize_mask || len & blksize_mask) {
Dave Chinner24513372014-06-25 14:58:08 +1000822 error = -EINVAL;
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100823 goto out_unlock;
824 }
825
Lukas Czerner23fffa92014-04-12 09:56:41 -0400826 /*
827 * There is no need to overlap collapse range with EOF,
828 * in which case it is effectively a truncate operation
829 */
830 if (offset + len >= i_size_read(inode)) {
Dave Chinner24513372014-06-25 14:58:08 +1000831 error = -EINVAL;
Lukas Czerner23fffa92014-04-12 09:56:41 -0400832 goto out_unlock;
833 }
834
Namjae Jeone1d8fb82014-02-24 10:58:19 +1100835 new_size = i_size_read(inode) - len;
836
837 error = xfs_collapse_file_space(ip, offset, len);
838 if (error)
839 goto out_unlock;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100840 } else if (mode & FALLOC_FL_INSERT_RANGE) {
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700841 unsigned int blksize_mask = i_blocksize(inode) - 1;
842 loff_t isize = i_size_read(inode);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100843
Namjae Jeona904b1c2015-03-25 15:08:56 +1100844 if (offset & blksize_mask || len & blksize_mask) {
845 error = -EINVAL;
846 goto out_unlock;
847 }
848
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700849 /*
850 * New inode size must not exceed ->s_maxbytes, accounting for
851 * possible signed overflow.
852 */
853 if (inode->i_sb->s_maxbytes - isize < len) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100854 error = -EFBIG;
855 goto out_unlock;
856 }
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700857 new_size = isize + len;
Namjae Jeona904b1c2015-03-25 15:08:56 +1100858
859 /* Offset should be less than i_size */
Darrick J. Wong7d83fb12018-04-16 23:07:45 -0700860 if (offset >= isize) {
Namjae Jeona904b1c2015-03-25 15:08:56 +1100861 error = -EINVAL;
862 goto out_unlock;
863 }
Thomas Meyer749f24f2017-10-09 11:38:54 -0700864 do_file_insert = true;
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700865 } else {
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100866 flags |= XFS_PREALLOC_SET;
867
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700868 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
869 offset + len > i_size_read(inode)) {
870 new_size = offset + len;
Dave Chinner24513372014-06-25 14:58:08 +1000871 error = inode_newsize_ok(inode, new_size);
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700872 if (error)
873 goto out_unlock;
874 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100875
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800876 if (mode & FALLOC_FL_ZERO_RANGE) {
Lukas Czerner376ba312014-03-13 19:07:58 +1100877 error = xfs_zero_file_space(ip, offset, len);
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800878 } else if (mode & FALLOC_FL_UNSHARE_RANGE) {
879 error = xfs_reflink_unshare(ip, offset, len);
880 if (error)
881 goto out_unlock;
882
883 if (!xfs_is_always_cow_inode(ip)) {
884 error = xfs_alloc_file_space(ip, offset, len,
885 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700886 }
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800887 } else {
888 /*
889 * If always_cow mode we can't use preallocations and
890 * thus should not create them.
891 */
892 if (xfs_is_always_cow_inode(ip)) {
893 error = -EOPNOTSUPP;
894 goto out_unlock;
895 }
896
Lukas Czerner376ba312014-03-13 19:07:58 +1100897 error = xfs_alloc_file_space(ip, offset, len,
898 XFS_BMAPI_PREALLOC);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -0700899 }
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100900 if (error)
901 goto out_unlock;
902 }
903
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700904 if (file->f_flags & O_DSYNC)
Christoph Hellwig8add71c2015-02-02 09:53:56 +1100905 flags |= XFS_PREALLOC_SYNC;
906
907 error = xfs_update_prealloc_flags(ip, flags);
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100908 if (error)
909 goto out_unlock;
910
911 /* Change file size if needed */
912 if (new_size) {
913 struct iattr iattr;
914
915 iattr.ia_valid = ATTR_SIZE;
916 iattr.ia_size = new_size;
Jan Kara69bca802016-05-26 14:46:43 +0200917 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
Namjae Jeona904b1c2015-03-25 15:08:56 +1100918 if (error)
919 goto out_unlock;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100920 }
921
Namjae Jeona904b1c2015-03-25 15:08:56 +1100922 /*
923 * Perform hole insertion now that the file size has been
924 * updated so that if we crash during the operation we don't
925 * leave shifted extents past EOF and hence losing access to
926 * the data that is contained within them.
927 */
928 if (do_file_insert)
929 error = xfs_insert_file_space(ip, offset, len);
930
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100931out_unlock:
Christoph Hellwig781355c2015-02-16 11:59:50 +1100932 xfs_iunlock(ip, iolock);
Dave Chinner24513372014-06-25 14:58:08 +1000933 return error;
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100934}
935
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100936
Eric Biggersda034bc2018-11-14 21:48:18 -0800937STATIC loff_t
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100938xfs_file_remap_range(
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100939 struct file *file_in,
940 loff_t pos_in,
941 struct file *file_out,
942 loff_t pos_out,
943 loff_t len,
944 unsigned int remap_flags)
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700945{
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100946 struct inode *inode_in = file_inode(file_in);
947 struct xfs_inode *src = XFS_I(inode_in);
948 struct inode *inode_out = file_inode(file_out);
949 struct xfs_inode *dest = XFS_I(inode_out);
950 struct xfs_mount *mp = src->i_mount;
951 loff_t remapped = 0;
952 xfs_extlen_t cowextsize;
953 int ret;
954
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +1100955 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
956 return -EINVAL;
Darrick J. Wongcc714662016-10-03 09:11:41 -0700957
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100958 if (!xfs_sb_version_hasreflink(&mp->m_sb))
959 return -EOPNOTSUPP;
960
961 if (XFS_FORCED_SHUTDOWN(mp))
962 return -EIO;
963
964 /* Prepare and then clone file data. */
965 ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
966 &len, remap_flags);
967 if (ret < 0 || len == 0)
968 return ret;
969
970 trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
971
972 ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
973 &remapped);
974 if (ret)
975 goto out_unlock;
976
977 /*
978 * Carry the cowextsize hint from src to dest if we're sharing the
979 * entire source file to the entire destination file, the source file
980 * has a cowextsize hint, and the destination file does not.
981 */
982 cowextsize = 0;
983 if (pos_in == 0 && len == i_size_read(inode_in) &&
984 (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
985 pos_out == 0 && len >= i_size_read(inode_out) &&
986 !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
987 cowextsize = src->i_d.di_cowextsize;
988
989 ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
990 remap_flags);
991
992out_unlock:
993 xfs_reflink_remap_unlock(file_in, file_out);
994 if (ret)
995 trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
996 return remapped > 0 ? remapped : ret;
Darrick J. Wong9fe26042016-10-03 09:11:40 -0700997}
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001000xfs_file_open(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 struct inode *inode,
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001002 struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001004 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return -EFBIG;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001006 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1007 return -EIO;
Christoph Hellwig91f99432017-08-29 16:13:20 +02001008 file->f_mode |= FMODE_NOWAIT;
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001009 return 0;
1010}
1011
1012STATIC int
1013xfs_dir_open(
1014 struct inode *inode,
1015 struct file *file)
1016{
1017 struct xfs_inode *ip = XFS_I(inode);
1018 int mode;
1019 int error;
1020
1021 error = xfs_file_open(inode, file);
1022 if (error)
1023 return error;
1024
1025 /*
1026 * If there are any blocks, read-ahead block 0 as we're almost
1027 * certain to have the next operation be a read there.
1028 */
Christoph Hellwig309ecac82013-12-06 12:30:09 -08001029 mode = xfs_ilock_data_map_shared(ip);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001030 if (ip->i_d.di_nextents > 0)
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001031 error = xfs_dir3_data_readahead(ip, 0, -1);
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001032 xfs_iunlock(ip, mode);
Darrick J. Wong7a652bb2017-02-02 15:13:58 -08001033 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001037xfs_file_release(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 struct inode *inode,
1039 struct file *filp)
1040{
Dave Chinner24513372014-06-25 14:58:08 +10001041 return xfs_release(XFS_I(inode));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042}
1043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044STATIC int
Nathan Scott3562fd42006-03-14 14:00:35 +11001045xfs_file_readdir(
Al Virob8227552013-05-22 17:07:56 -04001046 struct file *file,
1047 struct dir_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048{
Al Virob8227552013-05-22 17:07:56 -04001049 struct inode *inode = file_inode(file);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001050 xfs_inode_t *ip = XFS_I(inode);
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001051 size_t bufsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001053 /*
1054 * The Linux API doesn't pass down the total size of the buffer
1055 * we read into down to the filesystem. With the filldir concept
1056 * it's not needed for correct information, but the XFS dir2 leaf
1057 * code wants an estimate of the buffer size to calculate it's
1058 * readahead window and size the buffers used for mapping to
1059 * physical blocks.
1060 *
1061 * Try to give it an estimate that's good enough, maybe at some
1062 * point we can change the ->readdir prototype to include the
Eric Sandeena9cc7992010-02-03 17:50:13 +00001063 * buffer size. For now we use the current glibc buffer size.
Christoph Hellwig051e7cd2007-08-28 13:58:24 +10001064 */
Darrick J. Wonga5c46e52017-10-17 21:37:44 -07001065 bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Darrick J. Wongacb95532017-06-16 11:00:14 -07001067 return xfs_readdir(NULL, ip, ctx, bufsize);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001068}
1069
1070STATIC loff_t
1071xfs_file_llseek(
1072 struct file *file,
1073 loff_t offset,
Eric Sandeen59f9c002014-09-09 11:57:10 +10001074 int whence)
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001075{
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001076 struct inode *inode = file->f_mapping->host;
1077
1078 if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1079 return -EIO;
1080
Eric Sandeen59f9c002014-09-09 11:57:10 +10001081 switch (whence) {
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001082 default:
Eric Sandeen59f9c002014-09-09 11:57:10 +10001083 return generic_file_llseek(file, offset, whence);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001084 case SEEK_HOLE:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001085 offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001086 break;
Eric Sandeen49c69592014-09-09 11:56:48 +10001087 case SEEK_DATA:
Christoph Hellwig60271ab72019-02-18 09:38:46 -08001088 offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001089 break;
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001090 }
Christoph Hellwig9b2970a2017-06-29 11:43:21 -07001091
1092 if (offset < 0)
1093 return offset;
1094 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001095}
1096
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001097/*
1098 * Locking for serialisation of IO during page faults. This results in a lock
1099 * ordering of:
1100 *
1101 * mmap_sem (MM)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001102 * sb_start_pagefault(vfs, freeze)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001103 * i_mmaplock (XFS - truncate serialisation)
Dave Chinner6b698ed2015-06-04 09:18:53 +10001104 * page_lock (MM)
1105 * i_lock (XFS - extent map serialisation)
Dave Chinnerde0e8c22015-02-23 21:44:19 +11001106 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001107static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001108__xfs_filemap_fault(
Dave Jiangc791ace2017-02-24 14:57:08 -08001109 struct vm_fault *vmf,
Christoph Hellwigd522d562017-08-29 10:08:41 -07001110 enum page_entry_size pe_size,
1111 bool write_fault)
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001112{
Dave Jiangf4200392017-02-22 15:40:06 -08001113 struct inode *inode = file_inode(vmf->vma->vm_file);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001114 struct xfs_inode *ip = XFS_I(inode);
Souptick Joarder05edd882018-05-29 10:39:03 -07001115 vm_fault_t ret;
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001116
Christoph Hellwigd522d562017-08-29 10:08:41 -07001117 trace_xfs_filemap_fault(ip, pe_size, write_fault);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001118
Christoph Hellwigd522d562017-08-29 10:08:41 -07001119 if (write_fault) {
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001120 sb_start_pagefault(inode->i_sb);
Dave Jiangf4200392017-02-22 15:40:06 -08001121 file_update_time(vmf->vma->vm_file);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001122 }
1123
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001124 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001125 if (IS_DAX(inode)) {
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001126 pfn_t pfn;
1127
Jan Karac0b24622018-01-07 16:38:43 -05001128 ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001129 if (ret & VM_FAULT_NEEDDSYNC)
1130 ret = dax_finish_sync_fault(vmf, pe_size, pfn);
Christoph Hellwigd522d562017-08-29 10:08:41 -07001131 } else {
1132 if (write_fault)
1133 ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
1134 else
1135 ret = filemap_fault(vmf);
1136 }
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001137 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001138
Christoph Hellwigd522d562017-08-29 10:08:41 -07001139 if (write_fault)
Dave Chinner13ad4fe2015-11-03 12:37:02 +11001140 sb_end_pagefault(inode->i_sb);
Matthew Wilcoxacd76e72015-09-08 14:59:06 -07001141 return ret;
1142}
1143
Souptick Joarder05edd882018-05-29 10:39:03 -07001144static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001145xfs_filemap_fault(
1146 struct vm_fault *vmf)
1147{
1148 /* DAX can shortcut the normal fault path on write faults! */
1149 return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1150 IS_DAX(file_inode(vmf->vma->vm_file)) &&
1151 (vmf->flags & FAULT_FLAG_WRITE));
1152}
1153
Souptick Joarder05edd882018-05-29 10:39:03 -07001154static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001155xfs_filemap_huge_fault(
1156 struct vm_fault *vmf,
1157 enum page_entry_size pe_size)
1158{
1159 if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1160 return VM_FAULT_FALLBACK;
1161
1162 /* DAX can shortcut the normal fault path on write faults! */
1163 return __xfs_filemap_fault(vmf, pe_size,
1164 (vmf->flags & FAULT_FLAG_WRITE));
1165}
1166
Souptick Joarder05edd882018-05-29 10:39:03 -07001167static vm_fault_t
Christoph Hellwigd522d562017-08-29 10:08:41 -07001168xfs_filemap_page_mkwrite(
1169 struct vm_fault *vmf)
1170{
1171 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1172}
1173
Dave Chinner3af49282015-11-03 12:37:02 +11001174/*
Jan Kara7b565c92017-11-01 16:36:46 +01001175 * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1176 * on write faults. In reality, it needs to serialise against truncate and
1177 * prepare memory for writing so handle is as standard write fault.
Dave Chinner3af49282015-11-03 12:37:02 +11001178 */
Souptick Joarder05edd882018-05-29 10:39:03 -07001179static vm_fault_t
Dave Chinner3af49282015-11-03 12:37:02 +11001180xfs_filemap_pfn_mkwrite(
Dave Chinner3af49282015-11-03 12:37:02 +11001181 struct vm_fault *vmf)
1182{
1183
Jan Kara7b565c92017-11-01 16:36:46 +01001184 return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
Dave Chinner3af49282015-11-03 12:37:02 +11001185}
1186
Dave Chinner6b698ed2015-06-04 09:18:53 +10001187static const struct vm_operations_struct xfs_file_vm_ops = {
1188 .fault = xfs_filemap_fault,
Dave Jianga2d58162017-02-24 14:56:59 -08001189 .huge_fault = xfs_filemap_huge_fault,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001190 .map_pages = filemap_map_pages,
1191 .page_mkwrite = xfs_filemap_page_mkwrite,
Dave Chinner3af49282015-11-03 12:37:02 +11001192 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
Dave Chinner6b698ed2015-06-04 09:18:53 +10001193};
1194
1195STATIC int
1196xfs_file_mmap(
1197 struct file *filp,
1198 struct vm_area_struct *vma)
1199{
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001200 /*
1201 * We don't support synchronous mappings for non-DAX files. At least
1202 * until someone comes with a sensible use case.
1203 */
1204 if (!IS_DAX(file_inode(filp)) && (vma->vm_flags & VM_SYNC))
1205 return -EOPNOTSUPP;
1206
Dave Chinner6b698ed2015-06-04 09:18:53 +10001207 file_accessed(filp);
1208 vma->vm_ops = &xfs_file_vm_ops;
1209 if (IS_DAX(file_inode(filp)))
Dave Jiange1fb4a02018-08-17 15:43:40 -07001210 vma->vm_flags |= VM_HUGEPAGE;
Dave Chinner6b698ed2015-06-04 09:18:53 +10001211 return 0;
Dave Chinner075a9242015-02-23 21:44:54 +11001212}
1213
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001214const struct file_operations xfs_file_operations = {
Jeff Liu3fe3e6b2012-05-10 21:29:17 +08001215 .llseek = xfs_file_llseek,
Al Virob4f5d2c2014-04-02 14:37:59 -04001216 .read_iter = xfs_file_read_iter,
Al Virobf97f3bc2014-04-03 14:20:23 -04001217 .write_iter = xfs_file_write_iter,
Al Viro82c156f2016-09-22 23:35:42 -04001218 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04001219 .splice_write = iter_file_splice_write,
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001220 .iopoll = iomap_dio_iopoll,
Nathan Scott3562fd42006-03-14 14:00:35 +11001221 .unlocked_ioctl = xfs_file_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001223 .compat_ioctl = xfs_file_compat_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224#endif
Nathan Scott3562fd42006-03-14 14:00:35 +11001225 .mmap = xfs_file_mmap,
Christoph Hellwiga39e5962017-11-01 16:36:47 +01001226 .mmap_supported_flags = MAP_SYNC,
Nathan Scott3562fd42006-03-14 14:00:35 +11001227 .open = xfs_file_open,
1228 .release = xfs_file_release,
1229 .fsync = xfs_file_fsync,
Toshi Kanidbe6ec82016-10-07 16:59:59 -07001230 .get_unmapped_area = thp_get_unmapped_area,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +01001231 .fallocate = xfs_file_fallocate,
Darrick J. Wong2e5dfc92018-10-30 10:41:21 +11001232 .remap_file_range = xfs_file_remap_range,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233};
1234
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001235const struct file_operations xfs_dir_file_operations = {
Christoph Hellwigf999a5b2008-11-28 14:23:32 +11001236 .open = xfs_dir_open,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 .read = generic_read_dir,
Al Viro3b0a3c12016-04-20 23:42:46 -04001238 .iterate_shared = xfs_file_readdir,
Al Viro59af1582008-08-24 07:24:41 -04001239 .llseek = generic_file_llseek,
Nathan Scott3562fd42006-03-14 14:00:35 +11001240 .unlocked_ioctl = xfs_file_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001241#ifdef CONFIG_COMPAT
Nathan Scott3562fd42006-03-14 14:00:35 +11001242 .compat_ioctl = xfs_file_compat_ioctl,
Nathan Scottd3870392005-05-06 06:44:46 -07001243#endif
Christoph Hellwig1da2f2d2011-10-02 14:25:16 +00001244 .fsync = xfs_dir_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245};