blob: 5f225881176b93a3d50e3accc2ccaa07e46f605a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dave Kleikampac27a0e2006-10-11 01:20:50 -07002/*
Mingming Cao617ba132006-10-11 01:20:53 -07003 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/file.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
Mingming Cao617ba132006-10-11 01:20:53 -070016 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070017 *
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
20 */
21
22#include <linux/time.h>
23#include <linux/fs.h>
Christoph Hellwig545052e2017-10-01 17:58:54 -040024#include <linux/iomap.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040025#include <linux/mount.h>
26#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070027#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050028#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050029#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080030#include <linux/uio.h>
Jan Karab8a61762017-11-01 16:36:45 +010031#include <linux/mman.h>
Matthew Bobrowski378f32b2019-11-05 23:02:39 +110032#include <linux/backing-dev.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040033#include "ext4.h"
34#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070035#include "xattr.h"
36#include "acl.h"
Matthew Bobrowski569342d2019-11-05 23:01:51 +110037#include "truncate.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070038
Matthew Bobrowskib1b47052019-11-05 23:01:37 +110039static bool ext4_dio_supported(struct inode *inode)
40{
41 if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
42 return false;
43 if (fsverity_active(inode))
44 return false;
45 if (ext4_should_journal_data(inode))
46 return false;
47 if (ext4_has_inline_data(inode))
48 return false;
49 return true;
50}
51
52static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
53{
54 ssize_t ret;
55 struct inode *inode = file_inode(iocb->ki_filp);
56
57 if (iocb->ki_flags & IOCB_NOWAIT) {
58 if (!inode_trylock_shared(inode))
59 return -EAGAIN;
60 } else {
61 inode_lock_shared(inode);
62 }
63
64 if (!ext4_dio_supported(inode)) {
65 inode_unlock_shared(inode);
66 /*
67 * Fallback to buffered I/O if the operation being performed on
68 * the inode is not supported by direct I/O. The IOCB_DIRECT
69 * flag needs to be cleared here in order to ensure that the
70 * direct I/O path within generic_file_read_iter() is not
71 * taken.
72 */
73 iocb->ki_flags &= ~IOCB_DIRECT;
74 return generic_file_read_iter(iocb, to);
75 }
76
77 ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
78 is_sync_kiocb(iocb));
79 inode_unlock_shared(inode);
80
81 file_accessed(iocb->ki_filp);
82 return ret;
83}
84
Jan Kara364443c2016-11-20 17:36:06 -050085#ifdef CONFIG_FS_DAX
86static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
87{
88 struct inode *inode = file_inode(iocb->ki_filp);
89 ssize_t ret;
90
Ritesh Harjanif629afe2019-12-12 11:25:55 +053091 if (iocb->ki_flags & IOCB_NOWAIT) {
92 if (!inode_trylock_shared(inode))
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -050093 return -EAGAIN;
Ritesh Harjanif629afe2019-12-12 11:25:55 +053094 } else {
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -050095 inode_lock_shared(inode);
96 }
Jan Kara364443c2016-11-20 17:36:06 -050097 /*
98 * Recheck under inode lock - at this point we are sure it cannot
99 * change anymore
100 */
101 if (!IS_DAX(inode)) {
102 inode_unlock_shared(inode);
103 /* Fallback to buffered IO in case we cannot support DAX */
104 return generic_file_read_iter(iocb, to);
105 }
106 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
107 inode_unlock_shared(inode);
108
109 file_accessed(iocb->ki_filp);
110 return ret;
111}
112#endif
113
114static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
115{
Matthew Bobrowskib1b47052019-11-05 23:01:37 +1100116 struct inode *inode = file_inode(iocb->ki_filp);
117
118 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500119 return -EIO;
120
Jan Kara364443c2016-11-20 17:36:06 -0500121 if (!iov_iter_count(to))
122 return 0; /* skip atime */
123
124#ifdef CONFIG_FS_DAX
Matthew Bobrowskib1b47052019-11-05 23:01:37 +1100125 if (IS_DAX(inode))
Jan Kara364443c2016-11-20 17:36:06 -0500126 return ext4_dax_read_iter(iocb, to);
127#endif
Matthew Bobrowskib1b47052019-11-05 23:01:37 +1100128 if (iocb->ki_flags & IOCB_DIRECT)
129 return ext4_dio_read_iter(iocb, to);
130
Jan Kara364443c2016-11-20 17:36:06 -0500131 return generic_file_read_iter(iocb, to);
132}
133
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700134/*
135 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -0700136 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700137 * gets called only when /all/ the files are closed.
138 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -0400139static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700140{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500141 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -0500142 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500143 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -0500144 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700145 /* if we are the last writer on the inode, drop the block reservation */
146 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -0400147 (atomic_read(&inode->i_writecount) == 1) &&
148 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700149 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500150 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -0400151 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500152 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700153 }
154 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -0700155 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700156
157 return 0;
158}
159
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500160/*
161 * This tests whether the IO in question is block-aligned or not.
162 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
163 * are converted to written only after the IO is complete. Until they are
164 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
165 * it needs to zero out portions of the start and/or end block. If 2 AIO
166 * threads are at work on the same unwritten block, they must be synchronized
167 * or one thread will zero the other's data, causing corruption.
168 */
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530169static bool
170ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500171{
172 struct super_block *sb = inode->i_sb;
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530173 unsigned long blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500174
Al Viro9b884162014-04-17 16:09:22 -0400175 if ((pos | iov_iter_alignment(from)) & blockmask)
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530176 return true;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500177
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530178 return false;
179}
180
181static bool
182ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
183{
184 if (offset + len > i_size_read(inode) ||
185 offset + len > EXT4_I(inode)->i_disksize)
186 return true;
187 return false;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500188}
189
Jan Kara213bcd92016-11-20 17:29:51 -0500190/* Is IO overwriting allocated and initialized blocks? */
191static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
192{
193 struct ext4_map_blocks map;
194 unsigned int blkbits = inode->i_blkbits;
195 int err, blklen;
196
197 if (pos + len > i_size_read(inode))
198 return false;
199
200 map.m_lblk = pos >> blkbits;
201 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
202 blklen = map.m_len;
203
204 err = ext4_map_blocks(NULL, inode, &map, 0);
205 /*
206 * 'err==len' means that all of the blocks have been preallocated,
207 * regardless of whether they have been initialized or not. To exclude
208 * unwritten extents, we need to check m_flags.
209 */
210 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
211}
212
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530213static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
214 struct iov_iter *from)
Jan Kara213bcd92016-11-20 17:29:51 -0500215{
216 struct inode *inode = file_inode(iocb->ki_filp);
217 ssize_t ret;
218
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100219 if (unlikely(IS_IMMUTABLE(inode)))
220 return -EPERM;
221
Jan Kara213bcd92016-11-20 17:29:51 -0500222 ret = generic_write_checks(iocb, from);
223 if (ret <= 0)
224 return ret;
Theodore Ts'o02b016c2019-06-09 22:04:33 -0400225
Jan Kara213bcd92016-11-20 17:29:51 -0500226 /*
227 * If we have encountered a bitmap-format file, the size limit
228 * is smaller than s_maxbytes, which is for extent-mapped files.
229 */
230 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
231 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
232
233 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
234 return -EFBIG;
235 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
236 }
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100237
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530238 return iov_iter_count(from);
239}
240
241static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
242{
243 ssize_t ret, count;
244
245 count = ext4_generic_write_checks(iocb, from);
246 if (count <= 0)
247 return count;
248
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100249 ret = file_modified(iocb->ki_filp);
250 if (ret)
251 return ret;
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530252 return count;
Jan Kara213bcd92016-11-20 17:29:51 -0500253}
254
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100255static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
256 struct iov_iter *from)
257{
258 ssize_t ret;
259 struct inode *inode = file_inode(iocb->ki_filp);
260
261 if (iocb->ki_flags & IOCB_NOWAIT)
262 return -EOPNOTSUPP;
263
264 inode_lock(inode);
265 ret = ext4_write_checks(iocb, from);
266 if (ret <= 0)
267 goto out;
268
269 current->backing_dev_info = inode_to_bdi(inode);
270 ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
271 current->backing_dev_info = NULL;
272
273out:
274 inode_unlock(inode);
275 if (likely(ret > 0)) {
276 iocb->ki_pos += ret;
277 ret = generic_write_sync(iocb, ret);
278 }
279
280 return ret;
281}
282
Matthew Bobrowski569342d2019-11-05 23:01:51 +1100283static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
284 ssize_t written, size_t count)
285{
286 handle_t *handle;
287 bool truncate = false;
288 u8 blkbits = inode->i_blkbits;
289 ext4_lblk_t written_blk, end_blk;
290
291 /*
292 * Note that EXT4_I(inode)->i_disksize can get extended up to
293 * inode->i_size while the I/O was running due to writeback of delalloc
294 * blocks. But, the code in ext4_iomap_alloc() is careful to use
295 * zeroed/unwritten extents if this is possible; thus we won't leave
296 * uninitialized blocks in a file even if we didn't succeed in writing
297 * as much as we intended.
298 */
299 WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
300 if (offset + count <= EXT4_I(inode)->i_disksize) {
301 /*
302 * We need to ensure that the inode is removed from the orphan
303 * list if it has been added prematurely, due to writeback of
304 * delalloc blocks.
305 */
306 if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
307 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
308
309 if (IS_ERR(handle)) {
310 ext4_orphan_del(NULL, inode);
311 return PTR_ERR(handle);
312 }
313
314 ext4_orphan_del(handle, inode);
315 ext4_journal_stop(handle);
316 }
317
318 return written;
319 }
320
321 if (written < 0)
322 goto truncate;
323
324 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
325 if (IS_ERR(handle)) {
326 written = PTR_ERR(handle);
327 goto truncate;
328 }
329
330 if (ext4_update_inode_size(inode, offset + written))
331 ext4_mark_inode_dirty(handle, inode);
332
333 /*
334 * We may need to truncate allocated but not written blocks beyond EOF.
335 */
336 written_blk = ALIGN(offset + written, 1 << blkbits);
337 end_blk = ALIGN(offset + count, 1 << blkbits);
338 if (written_blk < end_blk && ext4_can_truncate(inode))
339 truncate = true;
340
341 /*
342 * Remove the inode from the orphan list if it has been extended and
343 * everything went OK.
344 */
345 if (!truncate && inode->i_nlink)
346 ext4_orphan_del(handle, inode);
347 ext4_journal_stop(handle);
348
349 if (truncate) {
350truncate:
351 ext4_truncate_failed_write(inode);
352 /*
353 * If the truncate operation failed early, then the inode may
354 * still be on the orphan list. In that case, we need to try
355 * remove the inode from the in-memory linked list.
356 */
357 if (inode->i_nlink)
358 ext4_orphan_del(NULL, inode);
359 }
360
361 return written;
362}
363
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100364static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
365 int error, unsigned int flags)
366{
367 loff_t offset = iocb->ki_pos;
368 struct inode *inode = file_inode(iocb->ki_filp);
369
370 if (error)
371 return error;
372
373 if (size && flags & IOMAP_DIO_UNWRITTEN)
374 return ext4_convert_unwritten_extents(NULL, inode,
375 offset, size);
376
377 return 0;
378}
379
380static const struct iomap_dio_ops ext4_dio_write_ops = {
381 .end_io = ext4_dio_write_end_io,
382};
383
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530384/*
385 * The intention here is to start with shared lock acquired then see if any
386 * condition requires an exclusive inode lock. If yes, then we restart the
387 * whole operation by releasing the shared lock and acquiring exclusive lock.
388 *
389 * - For unaligned_io we never take shared lock as it may cause data corruption
390 * when two unaligned IO tries to modify the same block e.g. while zeroing.
391 *
392 * - For extending writes case we don't take the shared lock, since it requires
393 * updating inode i_disksize and/or orphan handling with exclusive lock.
394 *
Ritesh Harjanibc6385d2019-12-12 11:25:57 +0530395 * - shared locking will only be true mostly with overwrites. Otherwise we will
396 * switch to exclusive i_rwsem lock.
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530397 */
398static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
399 bool *ilock_shared, bool *extend)
400{
401 struct file *file = iocb->ki_filp;
402 struct inode *inode = file_inode(file);
403 loff_t offset;
404 size_t count;
405 ssize_t ret;
406
407restart:
408 ret = ext4_generic_write_checks(iocb, from);
409 if (ret <= 0)
410 goto out;
411
412 offset = iocb->ki_pos;
413 count = ret;
414 if (ext4_extending_io(inode, offset, count))
415 *extend = true;
416 /*
417 * Determine whether the IO operation will overwrite allocated
Ritesh Harjanibc6385d2019-12-12 11:25:57 +0530418 * and initialized blocks.
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530419 * We need exclusive i_rwsem for changing security info
420 * in file_modified().
421 */
422 if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530423 !ext4_overwrite_io(inode, offset, count))) {
424 inode_unlock_shared(inode);
425 *ilock_shared = false;
426 inode_lock(inode);
427 goto restart;
428 }
429
430 ret = file_modified(file);
431 if (ret < 0)
432 goto out;
433
434 return count;
435out:
436 if (*ilock_shared)
437 inode_unlock_shared(inode);
438 else
439 inode_unlock(inode);
440 return ret;
441}
442
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100443static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
444{
445 ssize_t ret;
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100446 handle_t *handle;
447 struct inode *inode = file_inode(iocb->ki_filp);
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530448 loff_t offset = iocb->ki_pos;
449 size_t count = iov_iter_count(from);
Jan Kara8cd115b2019-12-18 18:44:33 +0100450 const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530451 bool extend = false, unaligned_io = false;
452 bool ilock_shared = true;
453
454 /*
455 * We initially start with shared inode lock unless it is
456 * unaligned IO which needs exclusive lock anyways.
457 */
458 if (ext4_unaligned_io(inode, from, offset)) {
459 unaligned_io = true;
460 ilock_shared = false;
461 }
462 /*
463 * Quick check here without any i_rwsem lock to see if it is extending
464 * IO. A more reliable check is done in ext4_dio_write_checks() with
465 * proper locking in place.
466 */
467 if (offset + count > i_size_read(inode))
468 ilock_shared = false;
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100469
470 if (iocb->ki_flags & IOCB_NOWAIT) {
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530471 if (ilock_shared) {
472 if (!inode_trylock_shared(inode))
473 return -EAGAIN;
474 } else {
475 if (!inode_trylock(inode))
476 return -EAGAIN;
477 }
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100478 } else {
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530479 if (ilock_shared)
480 inode_lock_shared(inode);
481 else
482 inode_lock(inode);
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100483 }
484
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530485 /* Fallback to buffered I/O if the inode does not support direct I/O. */
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100486 if (!ext4_dio_supported(inode)) {
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530487 if (ilock_shared)
488 inode_unlock_shared(inode);
489 else
490 inode_unlock(inode);
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100491 return ext4_buffered_write_iter(iocb, from);
492 }
493
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530494 ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
495 if (ret <= 0)
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100496 return ret;
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100497
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100498 offset = iocb->ki_pos;
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530499 count = ret;
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100500
501 /*
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530502 * Unaligned direct IO must be serialized among each other as zeroing
503 * of partial blocks of two competing unaligned IOs can result in data
504 * corruption.
505 *
506 * So we make sure we don't allow any unaligned IO in flight.
507 * For IOs where we need not wait (like unaligned non-AIO DIO),
508 * below inode_dio_wait() may anyway become a no-op, since we start
509 * with exclusive lock.
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100510 */
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530511 if (unaligned_io)
512 inode_dio_wait(inode);
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100513
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530514 if (extend) {
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100515 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
516 if (IS_ERR(handle)) {
517 ret = PTR_ERR(handle);
518 goto out;
519 }
520
521 ret = ext4_orphan_add(handle, inode);
522 if (ret) {
523 ext4_journal_stop(handle);
524 goto out;
525 }
526
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100527 ext4_journal_stop(handle);
528 }
529
Jan Kara8cd115b2019-12-18 18:44:33 +0100530 if (ilock_shared)
531 iomap_ops = &ext4_iomap_overwrite_ops;
532 ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530533 is_sync_kiocb(iocb) || unaligned_io || extend);
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100534
535 if (extend)
536 ret = ext4_handle_inode_extension(inode, offset, ret, count);
537
538out:
Ritesh Harjaniaa9714d02019-12-12 11:25:56 +0530539 if (ilock_shared)
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100540 inode_unlock_shared(inode);
541 else
542 inode_unlock(inode);
543
544 if (ret >= 0 && iov_iter_count(from)) {
545 ssize_t err;
546 loff_t endbyte;
547
548 offset = iocb->ki_pos;
549 err = ext4_buffered_write_iter(iocb, from);
550 if (err < 0)
551 return err;
552
553 /*
554 * We need to ensure that the pages within the page cache for
555 * the range covered by this I/O are written to disk and
556 * invalidated. This is in attempt to preserve the expected
557 * direct I/O semantics in the case we fallback to buffered I/O
558 * to complete off the I/O request.
559 */
560 ret += err;
561 endbyte = offset + err - 1;
562 err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
563 offset, endbyte);
564 if (!err)
565 invalidate_mapping_pages(iocb->ki_filp->f_mapping,
566 offset >> PAGE_SHIFT,
567 endbyte >> PAGE_SHIFT);
568 }
569
570 return ret;
571}
572
Jan Kara776722e2016-11-20 18:09:11 -0500573#ifdef CONFIG_FS_DAX
574static ssize_t
575ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
576{
Jan Kara776722e2016-11-20 18:09:11 -0500577 ssize_t ret;
Matthew Bobrowski569342d2019-11-05 23:01:51 +1100578 size_t count;
579 loff_t offset;
Matthew Bobrowski0b9f2302019-11-05 23:02:08 +1100580 handle_t *handle;
581 bool extend = false;
Matthew Bobrowski569342d2019-11-05 23:01:51 +1100582 struct inode *inode = file_inode(iocb->ki_filp);
Jan Kara776722e2016-11-20 18:09:11 -0500583
Ritesh Harjanif629afe2019-12-12 11:25:55 +0530584 if (iocb->ki_flags & IOCB_NOWAIT) {
585 if (!inode_trylock(inode))
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500586 return -EAGAIN;
Ritesh Harjanif629afe2019-12-12 11:25:55 +0530587 } else {
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500588 inode_lock(inode);
589 }
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100590
Jan Kara776722e2016-11-20 18:09:11 -0500591 ret = ext4_write_checks(iocb, from);
592 if (ret <= 0)
593 goto out;
Jan Kara776722e2016-11-20 18:09:11 -0500594
Matthew Bobrowski569342d2019-11-05 23:01:51 +1100595 offset = iocb->ki_pos;
596 count = iov_iter_count(from);
Matthew Bobrowski0b9f2302019-11-05 23:02:08 +1100597
598 if (offset + count > EXT4_I(inode)->i_disksize) {
599 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
600 if (IS_ERR(handle)) {
601 ret = PTR_ERR(handle);
602 goto out;
603 }
604
605 ret = ext4_orphan_add(handle, inode);
606 if (ret) {
607 ext4_journal_stop(handle);
608 goto out;
609 }
610
611 extend = true;
612 ext4_journal_stop(handle);
613 }
614
Jan Kara776722e2016-11-20 18:09:11 -0500615 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
Matthew Bobrowski0b9f2302019-11-05 23:02:08 +1100616
617 if (extend)
618 ret = ext4_handle_inode_extension(inode, offset, ret, count);
Jan Kara776722e2016-11-20 18:09:11 -0500619out:
Christoph Hellwigff5462e2017-02-08 14:39:27 -0500620 inode_unlock(inode);
Jan Kara776722e2016-11-20 18:09:11 -0500621 if (ret > 0)
622 ret = generic_write_sync(iocb, ret);
623 return ret;
624}
625#endif
626
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700627static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400628ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700629{
Al Viro496ad9a2013-01-23 17:07:38 -0500630 struct inode *inode = file_inode(iocb->ki_filp);
Theodore Ts'o7608e612014-04-21 14:26:28 -0400631
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500632 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
633 return -EIO;
634
Jan Kara776722e2016-11-20 18:09:11 -0500635#ifdef CONFIG_FS_DAX
636 if (IS_DAX(inode))
637 return ext4_dax_write_iter(iocb, from);
638#endif
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100639 if (iocb->ki_flags & IOCB_DIRECT)
640 return ext4_dio_write_iter(iocb, from);
Jan Kara776722e2016-11-20 18:09:11 -0500641
Matthew Bobrowski378f32b2019-11-05 23:02:39 +1100642 return ext4_buffered_write_iter(iocb, from);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700643}
644
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800645#ifdef CONFIG_FS_DAX
Souptick Joarder71fe98992018-05-13 16:01:49 -0400646static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
Dave Jiangc791ace2017-02-24 14:57:08 -0800647 enum page_entry_size pe_size)
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800648{
Souptick Joarder71fe98992018-05-13 16:01:49 -0400649 int error = 0;
650 vm_fault_t result;
Jan Kara22446422018-01-07 16:41:01 -0500651 int retries = 0;
Jan Karafb26a1c2017-05-12 15:46:54 -0700652 handle_t *handle = NULL;
Dave Jiang11bac802017-02-24 14:56:41 -0800653 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500654 struct super_block *sb = inode->i_sb;
Randy Dodgenfd96b8d2017-08-24 15:26:01 -0400655
656 /*
657 * We have to distinguish real writes from writes which will result in a
658 * COW page; COW writes should *not* poke the journal (the file will not
659 * be changed). Doing so would cause unintended failures when mounted
660 * read-only.
661 *
662 * We check for VM_SHARED rather than vmf->cow_page since the latter is
663 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
664 * other sizes, dax_iomap_fault will handle splitting / fallback so that
665 * we eventually come back with a COW page.
666 */
667 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
668 (vmf->vma->vm_flags & VM_SHARED);
Jan Karab8a61762017-11-01 16:36:45 +0100669 pfn_t pfn;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700670
671 if (write) {
672 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800673 file_update_time(vmf->vma->vm_file);
Jan Karafb26a1c2017-05-12 15:46:54 -0700674 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara22446422018-01-07 16:41:01 -0500675retry:
Jan Karafb26a1c2017-05-12 15:46:54 -0700676 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
677 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Kara497f6922017-11-01 16:36:44 +0100678 if (IS_ERR(handle)) {
679 up_read(&EXT4_I(inode)->i_mmap_sem);
680 sb_end_pagefault(sb);
681 return VM_FAULT_SIGBUS;
682 }
Jan Karafb26a1c2017-05-12 15:46:54 -0700683 } else {
684 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara1db17542016-10-21 11:33:49 +0200685 }
Jan Kara22446422018-01-07 16:41:01 -0500686 result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
Jan Karafb26a1c2017-05-12 15:46:54 -0700687 if (write) {
Jan Kara497f6922017-11-01 16:36:44 +0100688 ext4_journal_stop(handle);
Jan Kara22446422018-01-07 16:41:01 -0500689
690 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
691 ext4_should_retry_alloc(sb, &retries))
692 goto retry;
Jan Karab8a61762017-11-01 16:36:45 +0100693 /* Handling synchronous page fault? */
694 if (result & VM_FAULT_NEEDDSYNC)
695 result = dax_finish_sync_fault(vmf, pe_size, pfn);
Jan Karafb26a1c2017-05-12 15:46:54 -0700696 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700697 sb_end_pagefault(sb);
Jan Karafb26a1c2017-05-12 15:46:54 -0700698 } else {
699 up_read(&EXT4_I(inode)->i_mmap_sem);
700 }
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700701
702 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800703}
704
Souptick Joarder71fe98992018-05-13 16:01:49 -0400705static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
Dave Jiangc791ace2017-02-24 14:57:08 -0800706{
707 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
708}
709
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800710static const struct vm_operations_struct ext4_dax_vm_ops = {
711 .fault = ext4_dax_fault,
Dave Jiangc791ace2017-02-24 14:57:08 -0800712 .huge_fault = ext4_dax_huge_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500713 .page_mkwrite = ext4_dax_fault,
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700714 .pfn_mkwrite = ext4_dax_fault,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800715};
716#else
717#define ext4_dax_vm_ops ext4_file_vm_ops
718#endif
719
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400720static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500721 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700722 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400723 .page_mkwrite = ext4_page_mkwrite,
724};
725
726static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
727{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400728 struct inode *inode = file->f_mapping->host;
Pankaj Guptae46bfc32019-07-05 19:33:27 +0530729 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
730 struct dax_device *dax_dev = sbi->s_daxdev;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400731
Pankaj Guptae46bfc32019-07-05 19:33:27 +0530732 if (unlikely(ext4_forced_shutdown(sbi)))
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500733 return -EIO;
734
Jan Karab8a61762017-11-01 16:36:45 +0100735 /*
Pankaj Guptae46bfc32019-07-05 19:33:27 +0530736 * We don't support synchronous mappings for non-DAX files and
737 * for DAX files if underneath dax_device is not synchronous.
Jan Karab8a61762017-11-01 16:36:45 +0100738 */
Pankaj Guptae46bfc32019-07-05 19:33:27 +0530739 if (!daxdev_mapping_supported(vma, dax_dev))
Jan Karab8a61762017-11-01 16:36:45 +0100740 return -EOPNOTSUPP;
741
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400742 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800743 if (IS_DAX(file_inode(file))) {
744 vma->vm_ops = &ext4_dax_vm_ops;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700745 vma->vm_flags |= VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800746 } else {
747 vma->vm_ops = &ext4_file_vm_ops;
748 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400749 return 0;
750}
751
Amir Goldstein833a9502018-05-13 22:44:23 -0400752static int ext4_sample_last_mounted(struct super_block *sb,
753 struct vfsmount *mnt)
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400754{
Amir Goldstein833a9502018-05-13 22:44:23 -0400755 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400756 struct path path;
757 char buf[64], *cp;
Amir Goldstein833a9502018-05-13 22:44:23 -0400758 handle_t *handle;
759 int err;
760
761 if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
762 return 0;
763
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400764 if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
Amir Goldstein833a9502018-05-13 22:44:23 -0400765 return 0;
766
767 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
768 /*
769 * Sample where the filesystem has been mounted and
770 * store it in the superblock for sysadmin convenience
771 * when trying to sort through large numbers of block
772 * devices or filesystem images.
773 */
774 memset(buf, 0, sizeof(buf));
775 path.mnt = mnt;
776 path.dentry = mnt->mnt_root;
777 cp = d_path(&path, buf, sizeof(buf));
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400778 err = 0;
Amir Goldstein833a9502018-05-13 22:44:23 -0400779 if (IS_ERR(cp))
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400780 goto out;
Amir Goldstein833a9502018-05-13 22:44:23 -0400781
782 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400783 err = PTR_ERR(handle);
Amir Goldstein833a9502018-05-13 22:44:23 -0400784 if (IS_ERR(handle))
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400785 goto out;
Amir Goldstein833a9502018-05-13 22:44:23 -0400786 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
787 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
788 if (err)
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400789 goto out_journal;
Amir Goldstein833a9502018-05-13 22:44:23 -0400790 strlcpy(sbi->s_es->s_last_mounted, cp,
791 sizeof(sbi->s_es->s_last_mounted));
792 ext4_handle_dirty_super(handle, sb);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400793out_journal:
Amir Goldstein833a9502018-05-13 22:44:23 -0400794 ext4_journal_stop(handle);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400795out:
796 sb_end_intwrite(sb);
Amir Goldstein833a9502018-05-13 22:44:23 -0400797 return err;
798}
799
800static int ext4_file_open(struct inode * inode, struct file * filp)
801{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400802 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400803
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500804 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
805 return -EIO;
806
Amir Goldstein833a9502018-05-13 22:44:23 -0400807 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
808 if (ret)
809 return ret;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400810
Eric Biggers09a5c312017-10-18 20:21:57 -0400811 ret = fscrypt_file_open(inode, filp);
812 if (ret)
813 return ret;
814
Eric Biggersc93d8f82019-07-22 09:26:24 -0700815 ret = fsverity_file_open(inode, filp);
816 if (ret)
817 return ret;
818
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500819 /*
820 * Set up the jbd2_inode if we are opening the inode for
821 * writing and the journal is present
822 */
Jan Karaa3612932013-08-16 21:19:41 -0400823 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400824 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400825 if (ret < 0)
826 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500827 }
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500828
Christoph Hellwig91f99432017-08-29 16:13:20 +0200829 filp->f_mode |= FMODE_NOWAIT;
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400830 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400831}
832
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400833/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500834 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
835 * by calling generic_file_llseek_size() with the appropriate maxbytes
836 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400837 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800838loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400839{
840 struct inode *inode = file->f_mapping->host;
841 loff_t maxbytes;
842
843 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
844 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
845 else
846 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400847
Andrew Morton965c8e52012-12-17 15:59:39 -0800848 switch (whence) {
Christoph Hellwig545052e2017-10-01 17:58:54 -0400849 default:
Andrew Morton965c8e52012-12-17 15:59:39 -0800850 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500851 maxbytes, i_size_read(inode));
Zheng Liuc8c0df22012-11-08 21:57:40 -0500852 case SEEK_HOLE:
Christoph Hellwig545052e2017-10-01 17:58:54 -0400853 inode_lock_shared(inode);
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +1100854 offset = iomap_seek_hole(inode, offset,
855 &ext4_iomap_report_ops);
Christoph Hellwig545052e2017-10-01 17:58:54 -0400856 inode_unlock_shared(inode);
857 break;
858 case SEEK_DATA:
859 inode_lock_shared(inode);
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +1100860 offset = iomap_seek_data(inode, offset,
861 &ext4_iomap_report_ops);
Christoph Hellwig545052e2017-10-01 17:58:54 -0400862 inode_unlock_shared(inode);
863 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500864 }
865
Christoph Hellwig545052e2017-10-01 17:58:54 -0400866 if (offset < 0)
867 return offset;
868 return vfs_setpos(file, offset, maxbytes);
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400869}
870
Mingming Cao617ba132006-10-11 01:20:53 -0700871const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400872 .llseek = ext4_llseek,
Jan Kara364443c2016-11-20 17:36:06 -0500873 .read_iter = ext4_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400874 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400875 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700876#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700877 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700878#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400879 .mmap = ext4_file_mmap,
Jan Karab8a61762017-11-01 16:36:45 +0100880 .mmap_supported_flags = MAP_SYNC,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400881 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700882 .release = ext4_release_file,
883 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700884 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700885 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400886 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100887 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700888};
889
Arjan van de Ven754661f2007-02-12 00:55:38 -0800890const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700891 .setattr = ext4_setattr,
David Howells99652ea2017-03-31 18:31:56 +0100892 .getattr = ext4_file_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700893 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200894 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800895 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400896 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700897};
898