blob: 197653ea60417aebac8284e69060a6e3e072ca69 [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
Jan Kara364443c2016-11-20 17:36:06 -050034#ifdef CONFIG_FS_DAX
35static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36{
37 struct inode *inode = file_inode(iocb->ki_filp);
38 ssize_t ret;
39
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -050040 if (!inode_trylock_shared(inode)) {
41 if (iocb->ki_flags & IOCB_NOWAIT)
42 return -EAGAIN;
43 inode_lock_shared(inode);
44 }
Jan Kara364443c2016-11-20 17:36:06 -050045 /*
46 * Recheck under inode lock - at this point we are sure it cannot
47 * change anymore
48 */
49 if (!IS_DAX(inode)) {
50 inode_unlock_shared(inode);
51 /* Fallback to buffered IO in case we cannot support DAX */
52 return generic_file_read_iter(iocb, to);
53 }
54 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
55 inode_unlock_shared(inode);
56
57 file_accessed(iocb->ki_filp);
58 return ret;
59}
60#endif
61
62static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
63{
Theodore Ts'o0db1ff22017-02-05 01:28:48 -050064 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
65 return -EIO;
66
Jan Kara364443c2016-11-20 17:36:06 -050067 if (!iov_iter_count(to))
68 return 0; /* skip atime */
69
70#ifdef CONFIG_FS_DAX
71 if (IS_DAX(file_inode(iocb->ki_filp)))
72 return ext4_dax_read_iter(iocb, to);
73#endif
74 return generic_file_read_iter(iocb, to);
75}
76
Dave Kleikampac27a0e2006-10-11 01:20:50 -070077/*
78 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070079 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070080 * gets called only when /all/ the files are closed.
81 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040082static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070083{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050084 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050085 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050086 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050087 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070088 /* if we are the last writer on the inode, drop the block reservation */
89 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040090 (atomic_read(&inode->i_writecount) == 1) &&
91 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070092 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050093 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040094 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050095 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070096 }
97 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070098 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070099
100 return 0;
101}
102
Stephen Hemmingerc1978552014-05-12 10:50:23 -0400103static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500104{
105 wait_queue_head_t *wq = ext4_ioend_wq(inode);
106
Dmitry Monakhove27f41e2012-09-28 23:24:52 -0400107 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500108}
109
110/*
111 * This tests whether the IO in question is block-aligned or not.
112 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
113 * are converted to written only after the IO is complete. Until they are
114 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
115 * it needs to zero out portions of the start and/or end block. If 2 AIO
116 * threads are at work on the same unwritten block, they must be synchronized
117 * or one thread will zero the other's data, causing corruption.
118 */
119static int
Al Viro9b884162014-04-17 16:09:22 -0400120ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500121{
122 struct super_block *sb = inode->i_sb;
123 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500124
Theodore Ts'o6e6358f2014-04-12 12:45:25 -0400125 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500126 return 0;
127
Al Viro9b884162014-04-17 16:09:22 -0400128 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500129 return 1;
130
131 return 0;
132}
133
Jan Kara213bcd92016-11-20 17:29:51 -0500134/* Is IO overwriting allocated and initialized blocks? */
135static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
136{
137 struct ext4_map_blocks map;
138 unsigned int blkbits = inode->i_blkbits;
139 int err, blklen;
140
141 if (pos + len > i_size_read(inode))
142 return false;
143
144 map.m_lblk = pos >> blkbits;
145 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
146 blklen = map.m_len;
147
148 err = ext4_map_blocks(NULL, inode, &map, 0);
149 /*
150 * 'err==len' means that all of the blocks have been preallocated,
151 * regardless of whether they have been initialized or not. To exclude
152 * unwritten extents, we need to check m_flags.
153 */
154 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
155}
156
157static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
158{
159 struct inode *inode = file_inode(iocb->ki_filp);
160 ssize_t ret;
161
162 ret = generic_write_checks(iocb, from);
163 if (ret <= 0)
164 return ret;
165 /*
166 * If we have encountered a bitmap-format file, the size limit
167 * is smaller than s_maxbytes, which is for extent-mapped files.
168 */
169 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
170 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
171
172 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
173 return -EFBIG;
174 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
175 }
176 return iov_iter_count(from);
177}
178
Jan Kara776722e2016-11-20 18:09:11 -0500179#ifdef CONFIG_FS_DAX
180static ssize_t
181ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
182{
183 struct inode *inode = file_inode(iocb->ki_filp);
184 ssize_t ret;
Jan Kara776722e2016-11-20 18:09:11 -0500185
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500186 if (!inode_trylock(inode)) {
187 if (iocb->ki_flags & IOCB_NOWAIT)
188 return -EAGAIN;
189 inode_lock(inode);
190 }
Jan Kara776722e2016-11-20 18:09:11 -0500191 ret = ext4_write_checks(iocb, from);
192 if (ret <= 0)
193 goto out;
194 ret = file_remove_privs(iocb->ki_filp);
195 if (ret)
196 goto out;
197 ret = file_update_time(iocb->ki_filp);
198 if (ret)
199 goto out;
200
Jan Kara776722e2016-11-20 18:09:11 -0500201 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
202out:
Christoph Hellwigff5462e2017-02-08 14:39:27 -0500203 inode_unlock(inode);
Jan Kara776722e2016-11-20 18:09:11 -0500204 if (ret > 0)
205 ret = generic_write_sync(iocb, ret);
206 return ret;
207}
208#endif
209
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700210static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400211ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700212{
Al Viro496ad9a2013-01-23 17:07:38 -0500213 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -0400214 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -0500215 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400216 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400217 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400218
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500219 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
220 return -EIO;
221
Jan Kara776722e2016-11-20 18:09:11 -0500222#ifdef CONFIG_FS_DAX
223 if (IS_DAX(inode))
224 return ext4_dax_write_iter(iocb, from);
225#endif
226
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500227 if (!inode_trylock(inode)) {
228 if (iocb->ki_flags & IOCB_NOWAIT)
229 return -EAGAIN;
230 inode_lock(inode);
231 }
232
Jan Kara213bcd92016-11-20 17:29:51 -0500233 ret = ext4_write_checks(iocb, from);
Al Viro3309dd02015-04-09 12:55:47 -0400234 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400235 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400236
237 /*
Jan Karae142d052016-03-08 22:44:50 -0500238 * Unaligned direct AIO must be serialized among each other as zeroing
239 * of partial blocks of two competing unaligned AIOs can result in data
240 * corruption.
241 */
242 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
243 !is_sync_kiocb(iocb) &&
244 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
245 unaligned_aio = 1;
246 ext4_unwritten_wait(inode);
247 }
248
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400249 iocb->private = &overwrite;
Jan Kara213bcd92016-11-20 17:29:51 -0500250 /* Check whether we do a DIO overwrite or not */
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500251 if (o_direct && !unaligned_aio) {
252 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
253 if (ext4_should_dioread_nolock(inode))
254 overwrite = 1;
255 } else if (iocb->ki_flags & IOCB_NOWAIT) {
256 ret = -EAGAIN;
257 goto out;
258 }
259 }
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400260
Al Viro9b884162014-04-17 16:09:22 -0400261 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500262 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400263
Christoph Hellwige2592212016-04-07 08:52:01 -0700264 if (ret > 0)
265 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400266
Al Viroe768d7f2015-04-07 14:48:22 -0400267 return ret;
268
269out:
Al Viro59551022016-01-22 15:40:57 -0500270 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500271 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700272}
273
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800274#ifdef CONFIG_FS_DAX
Dave Jiangc791ace2017-02-24 14:57:08 -0800275static int ext4_dax_huge_fault(struct vm_fault *vmf,
276 enum page_entry_size pe_size)
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800277{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700278 int result;
Jan Karafb26a1c2017-05-12 15:46:54 -0700279 handle_t *handle = NULL;
Dave Jiang11bac802017-02-24 14:56:41 -0800280 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500281 struct super_block *sb = inode->i_sb;
Randy Dodgenfd96b8d2017-08-24 15:26:01 -0400282
283 /*
284 * We have to distinguish real writes from writes which will result in a
285 * COW page; COW writes should *not* poke the journal (the file will not
286 * be changed). Doing so would cause unintended failures when mounted
287 * read-only.
288 *
289 * We check for VM_SHARED rather than vmf->cow_page since the latter is
290 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
291 * other sizes, dax_iomap_fault will handle splitting / fallback so that
292 * we eventually come back with a COW page.
293 */
294 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
295 (vmf->vma->vm_flags & VM_SHARED);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700296
297 if (write) {
298 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800299 file_update_time(vmf->vma->vm_file);
Jan Karafb26a1c2017-05-12 15:46:54 -0700300 down_read(&EXT4_I(inode)->i_mmap_sem);
301 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
302 EXT4_DATA_TRANS_BLOCKS(sb));
303 } else {
304 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara1db17542016-10-21 11:33:49 +0200305 }
Jan Karafb26a1c2017-05-12 15:46:54 -0700306 if (!IS_ERR(handle))
307 result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
308 else
309 result = VM_FAULT_SIGBUS;
310 if (write) {
311 if (!IS_ERR(handle))
312 ext4_journal_stop(handle);
313 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700314 sb_end_pagefault(sb);
Jan Karafb26a1c2017-05-12 15:46:54 -0700315 } else {
316 up_read(&EXT4_I(inode)->i_mmap_sem);
317 }
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700318
319 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800320}
321
Dave Jiangc791ace2017-02-24 14:57:08 -0800322static int ext4_dax_fault(struct vm_fault *vmf)
323{
324 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
325}
326
Jan Karaea3d7202015-12-07 14:28:03 -0500327/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500328 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500329 * handler we check for races agaist truncate. Note that since we cycle through
330 * i_mmap_sem, we are sure that also any hole punching that began before we
331 * were called is finished by now and so if it included part of the file we
332 * are working on, our pte will get unmapped and the check for pte_same() in
333 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
334 * desired.
335 */
Dave Jiang11bac802017-02-24 14:56:41 -0800336static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
Jan Karaea3d7202015-12-07 14:28:03 -0500337{
Dave Jiang11bac802017-02-24 14:56:41 -0800338 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500339 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500340 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800341 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500342
343 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800344 file_update_time(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500345 down_read(&EXT4_I(inode)->i_mmap_sem);
346 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
347 if (vmf->pgoff >= size)
348 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800349 else
Dave Jiang11bac802017-02-24 14:56:41 -0800350 ret = dax_pfn_mkwrite(vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500351 up_read(&EXT4_I(inode)->i_mmap_sem);
352 sb_end_pagefault(sb);
353
354 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800355}
356
357static const struct vm_operations_struct ext4_dax_vm_ops = {
358 .fault = ext4_dax_fault,
Dave Jiangc791ace2017-02-24 14:57:08 -0800359 .huge_fault = ext4_dax_huge_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500360 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500361 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800362};
363#else
364#define ext4_dax_vm_ops ext4_file_vm_ops
365#endif
366
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400367static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500368 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700369 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400370 .page_mkwrite = ext4_page_mkwrite,
371};
372
373static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
374{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400375 struct inode *inode = file->f_mapping->host;
376
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500377 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
378 return -EIO;
379
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400380 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800381 if (IS_DAX(file_inode(file))) {
382 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700383 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800384 } else {
385 vma->vm_ops = &ext4_file_vm_ops;
386 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400387 return 0;
388}
389
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400390static int ext4_file_open(struct inode * inode, struct file * filp)
391{
392 struct super_block *sb = inode->i_sb;
393 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
394 struct vfsmount *mnt = filp->f_path.mnt;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400395 struct dentry *dir;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400396 struct path path;
397 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400398 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400399
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500400 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
401 return -EIO;
402
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400403 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
404 !(sb->s_flags & MS_RDONLY))) {
405 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
406 /*
407 * Sample where the filesystem has been mounted and
408 * store it in the superblock for sysadmin convenience
409 * when trying to sort through large numbers of block
410 * devices or filesystem images.
411 */
412 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500413 path.mnt = mnt;
414 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400415 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400416 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400417 handle_t *handle;
418 int err;
419
Theodore Ts'o9924a922013-02-08 21:59:22 -0500420 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400421 if (IS_ERR(handle))
422 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400423 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400424 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
425 if (err) {
426 ext4_journal_stop(handle);
427 return err;
428 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400429 strlcpy(sbi->s_es->s_last_mounted, cp,
430 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400431 ext4_handle_dirty_super(handle, sb);
432 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400433 }
434 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400435 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400436 ret = fscrypt_get_encryption_info(inode);
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400437 if (ret)
438 return -EACCES;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400439 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400440 return -ENOKEY;
441 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400442
Miklos Szeredic0a37d4872016-03-26 16:14:42 -0400443 dir = dget_parent(file_dentry(filp));
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400444 if (ext4_encrypted_inode(d_inode(dir)) &&
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400445 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
Theodore Ts'off978b02016-02-08 00:54:26 -0500446 ext4_warning(inode->i_sb,
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -0400447 "Inconsistent encryption contexts: %lu/%lu",
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400448 (unsigned long) d_inode(dir)->i_ino,
Theodore Ts'off978b02016-02-08 00:54:26 -0500449 (unsigned long) inode->i_ino);
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400450 dput(dir);
Theodore Ts'off978b02016-02-08 00:54:26 -0500451 return -EPERM;
452 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400453 dput(dir);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500454 /*
455 * Set up the jbd2_inode if we are opening the inode for
456 * writing and the journal is present
457 */
Jan Karaa3612932013-08-16 21:19:41 -0400458 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400459 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400460 if (ret < 0)
461 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500462 }
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500463
464 /* Set the flags to support nowait AIO */
465 filp->f_mode |= FMODE_AIO_NOWAIT;
466
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400467 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400468}
469
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400470/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500471 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
472 * file rather than ext4_ext_walk_space() because we can introduce
473 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
474 * function. When extent status tree has been fully implemented, it will
475 * track all extent status for a file and we can directly use it to
476 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
477 */
478
479/*
480 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
481 * lookup page cache to check whether or not there has some data between
482 * [startoff, endoff] because, if this range contains an unwritten extent,
483 * we determine this extent as a data or a hole according to whether the
484 * page cache has data or not.
485 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500486static int ext4_find_unwritten_pgoff(struct inode *inode,
487 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500488 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500489 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500490{
491 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500492 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500493 pgoff_t index;
494 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500495 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500496 loff_t startoff;
497 loff_t lastoff;
498 int found = 0;
499
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500500 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500501 startoff = *offset;
502 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500503 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500504
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300505 index = startoff >> PAGE_SHIFT;
Jan Kara3f1d5ba2017-05-21 22:34:23 -0400506 end = (endoff - 1) >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500507
508 pagevec_init(&pvec, 0);
509 do {
510 int i, num;
511 unsigned long nr_pages;
512
Eryu Guan624327f2017-05-24 18:02:20 -0400513 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500514 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
515 (pgoff_t)num);
Jan Kara7d95edd2017-05-21 22:33:23 -0400516 if (nr_pages == 0)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500517 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500518
519 for (i = 0; i < nr_pages; i++) {
520 struct page *page = pvec.pages[i];
521 struct buffer_head *bh, *head;
522
523 /*
Jan Kara7d95edd2017-05-21 22:33:23 -0400524 * If current offset is smaller than the page offset,
525 * there is a hole at this offset.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500526 */
Jan Kara7d95edd2017-05-21 22:33:23 -0400527 if (whence == SEEK_HOLE && lastoff < endoff &&
528 lastoff < page_offset(pvec.pages[i])) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500529 found = 1;
530 *offset = lastoff;
531 goto out;
532 }
533
Jan Kara7d95edd2017-05-21 22:33:23 -0400534 if (page->index > end)
535 goto out;
536
Zheng Liuc8c0df22012-11-08 21:57:40 -0500537 lock_page(page);
538
539 if (unlikely(page->mapping != inode->i_mapping)) {
540 unlock_page(page);
541 continue;
542 }
543
544 if (!page_has_buffers(page)) {
545 unlock_page(page);
546 continue;
547 }
548
549 if (page_has_buffers(page)) {
550 lastoff = page_offset(page);
551 bh = head = page_buffers(page);
552 do {
Jan Karafcf5ea12017-08-05 17:43:24 -0400553 if (lastoff + bh->b_size <= startoff)
554 goto next;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500555 if (buffer_uptodate(bh) ||
556 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800557 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500558 found = 1;
559 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800560 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500561 found = 1;
562 }
563 if (found) {
564 *offset = max_t(loff_t,
565 startoff, lastoff);
566 unlock_page(page);
567 goto out;
568 }
Jan Karafcf5ea12017-08-05 17:43:24 -0400569next:
Zheng Liuc8c0df22012-11-08 21:57:40 -0500570 lastoff += bh->b_size;
571 bh = bh->b_this_page;
572 } while (bh != head);
573 }
574
575 lastoff = page_offset(page) + PAGE_SIZE;
576 unlock_page(page);
577 }
578
Jan Kara7d95edd2017-05-21 22:33:23 -0400579 /* The no. of pages is less than our desired, we are done. */
580 if (nr_pages < num)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500581 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500582
583 index = pvec.pages[i - 1]->index + 1;
584 pagevec_release(&pvec);
585 } while (index <= end);
586
Jan Kara7d95edd2017-05-21 22:33:23 -0400587 if (whence == SEEK_HOLE && lastoff < endoff) {
588 found = 1;
589 *offset = lastoff;
590 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500591out:
592 pagevec_release(&pvec);
593 return found;
594}
595
596/*
597 * ext4_seek_data() retrieves the offset for SEEK_DATA.
598 */
599static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
600{
601 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500602 struct extent_status es;
603 ext4_lblk_t start, last, end;
604 loff_t dataoff, isize;
605 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500606 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500607
Al Viro59551022016-01-22 15:40:57 -0500608 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500609
610 isize = i_size_read(inode);
Darrick J. Wong1bd8d6c2017-08-24 13:22:06 -0400611 if (offset < 0 || offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500612 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500613 return -ENXIO;
614 }
615
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500616 blkbits = inode->i_sb->s_blocksize_bits;
617 start = offset >> blkbits;
618 last = start;
619 end = isize >> blkbits;
620 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500621
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500622 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500623 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
624 if (ret <= 0) {
625 /* No extent found -> no data */
626 if (ret == 0)
627 ret = -ENXIO;
628 inode_unlock(inode);
629 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500630 }
631
Jan Kara2d90c162016-03-09 23:11:13 -0500632 last = es.es_lblk;
633 if (last != start)
634 dataoff = (loff_t)last << blkbits;
635 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500636 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500637
638 /*
639 * If there is a unwritten extent at this offset,
640 * it will be as a data or a hole according to page
641 * cache that has data or not.
642 */
Jan Kara2d90c162016-03-09 23:11:13 -0500643 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
644 es.es_lblk + es.es_len, &dataoff))
645 break;
646 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500647 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500648 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500649 } while (last <= end);
650
Al Viro59551022016-01-22 15:40:57 -0500651 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500652
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500653 if (dataoff > isize)
654 return -ENXIO;
655
656 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500657}
658
659/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500660 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500661 */
662static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
663{
664 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500665 struct extent_status es;
666 ext4_lblk_t start, last, end;
667 loff_t holeoff, isize;
668 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500669 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500670
Al Viro59551022016-01-22 15:40:57 -0500671 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500672
673 isize = i_size_read(inode);
Darrick J. Wong1bd8d6c2017-08-24 13:22:06 -0400674 if (offset < 0 || offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500675 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500676 return -ENXIO;
677 }
678
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500679 blkbits = inode->i_sb->s_blocksize_bits;
680 start = offset >> blkbits;
681 last = start;
682 end = isize >> blkbits;
683 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500684
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500685 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500686 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
687 if (ret < 0) {
688 inode_unlock(inode);
689 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500690 }
Jan Kara2d90c162016-03-09 23:11:13 -0500691 /* Found a hole? */
692 if (ret == 0 || es.es_lblk > last) {
693 if (last != start)
694 holeoff = (loff_t)last << blkbits;
695 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500696 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500697 /*
698 * If there is a unwritten extent at this offset,
699 * it will be as a data or a hole according to page
700 * cache that has data or not.
701 */
Jan Kara2d90c162016-03-09 23:11:13 -0500702 if (ext4_es_is_unwritten(&es) &&
703 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
704 last + es.es_len, &holeoff))
705 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500706
Jan Kara2d90c162016-03-09 23:11:13 -0500707 last += es.es_len;
708 holeoff = (loff_t)last << blkbits;
709 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500710 } while (last <= end);
711
Al Viro59551022016-01-22 15:40:57 -0500712 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500713
714 if (holeoff > isize)
715 holeoff = isize;
716
717 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500718}
719
720/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500721 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
722 * by calling generic_file_llseek_size() with the appropriate maxbytes
723 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400724 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800725loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400726{
727 struct inode *inode = file->f_mapping->host;
728 loff_t maxbytes;
729
730 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
731 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
732 else
733 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400734
Andrew Morton965c8e52012-12-17 15:59:39 -0800735 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500736 case SEEK_SET:
737 case SEEK_CUR:
738 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800739 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500740 maxbytes, i_size_read(inode));
741 case SEEK_DATA:
742 return ext4_seek_data(file, offset, maxbytes);
743 case SEEK_HOLE:
744 return ext4_seek_hole(file, offset, maxbytes);
745 }
746
747 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400748}
749
Mingming Cao617ba132006-10-11 01:20:53 -0700750const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400751 .llseek = ext4_llseek,
Jan Kara364443c2016-11-20 17:36:06 -0500752 .read_iter = ext4_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400753 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400754 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700755#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700756 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700757#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400758 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400759 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700760 .release = ext4_release_file,
761 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700762 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700763 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400764 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100765 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700766};
767
Arjan van de Ven754661f2007-02-12 00:55:38 -0800768const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700769 .setattr = ext4_setattr,
David Howells99652ea2017-03-31 18:31:56 +0100770 .getattr = ext4_file_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700771 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200772 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800773 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400774 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700775};
776