blob: 927e47db7f00b41cde5f0351a11bd14cec250424 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dave Kleikampac27a0e2006-10-11 01:20:50 -07002/*
Mingming Cao617ba132006-10-11 01:20:53 -07003 * linux/fs/ext4/inode.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
Mingming Cao617ba132006-10-11 01:20:53 -070019 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
Dave Kleikampac27a0e2006-10-11 01:20:50 -070020 */
21
Dave Kleikampac27a0e2006-10-11 01:20:50 -070022#include <linux/fs.h>
Christian Brauner14f3db52021-01-21 14:19:57 +010023#include <linux/mount.h>
Dave Kleikampac27a0e2006-10-11 01:20:50 -070024#include <linux/time.h>
Dave Kleikampac27a0e2006-10-11 01:20:50 -070025#include <linux/highuid.h>
26#include <linux/pagemap.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070027#include <linux/dax.h>
Dave Kleikampac27a0e2006-10-11 01:20:50 -070028#include <linux/quotaops.h>
29#include <linux/string.h>
30#include <linux/buffer_head.h>
31#include <linux/writeback.h>
Alex Tomas64769242008-07-11 19:27:31 -040032#include <linux/pagevec.h>
Dave Kleikampac27a0e2006-10-11 01:20:50 -070033#include <linux/mpage.h>
Duane Griffine83c1392008-12-19 20:47:15 +000034#include <linux/namei.h>
Dave Kleikampac27a0e2006-10-11 01:20:50 -070035#include <linux/uio.h>
36#include <linux/bio.h>
Mingming Cao4c0425f2009-09-28 15:48:41 -040037#include <linux/workqueue.h>
Jiaying Zhang744692d2010-03-04 16:14:02 -050038#include <linux/kernel.h>
Andrew Morton6db26ff2011-01-12 16:59:13 -080039#include <linux/printk.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/slab.h>
Theodore Ts'o00a1a052014-03-30 10:20:01 -040041#include <linux/bitops.h>
Jan Kara364443c2016-11-20 17:36:06 -050042#include <linux/iomap.h>
Jeff Laytonae5e1652018-01-29 06:41:30 -050043#include <linux/iversion.h>
Theodore Ts'o9bffad12009-06-17 11:48:11 -040044
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040045#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070046#include "xattr.h"
47#include "acl.h"
Theodore Ts'o9f125d62011-06-27 19:16:04 -040048#include "truncate.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070049
Theodore Ts'o9bffad12009-06-17 11:48:11 -040050#include <trace/events/ext4.h>
51
Darrick J. Wong814525f2012-04-29 18:31:10 -040052static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
54{
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Darrick J. Wong814525f2012-04-29 18:31:10 -040056 __u32 csum;
Daeho Jeongb47820e2016-07-03 17:51:39 -040057 __u16 dummy_csum = 0;
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
Darrick J. Wong814525f2012-04-29 18:31:10 -040060
Daeho Jeongb47820e2016-07-03 17:51:39 -040061 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 offset += csum_size;
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
66
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 csum_size);
75 offset += csum_size;
Daeho Jeongb47820e2016-07-03 17:51:39 -040076 }
Daeho Jeong05ac5aa2016-12-01 11:49:12 -050077 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
Darrick J. Wong814525f2012-04-29 18:31:10 -040079 }
80
Darrick J. Wong814525f2012-04-29 18:31:10 -040081 return csum;
82}
83
84static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
86{
87 __u32 provided, calculated;
88
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
Dmitry Monakhov9aa5d32b2014-10-13 03:36:16 -040091 !ext4_has_metadata_csum(inode->i_sb))
Darrick J. Wong814525f2012-04-29 18:31:10 -040092 return 1;
93
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 else
100 calculated &= 0xFFFF;
101
102 return provided == calculated;
103}
104
Harshad Shirwadkar8016e292020-10-15 13:37:59 -0700105void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
Darrick J. Wong814525f2012-04-29 18:31:10 -0400107{
108 __u32 csum;
109
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
Dmitry Monakhov9aa5d32b2014-10-13 03:36:16 -0400112 !ext4_has_metadata_csum(inode->i_sb))
Darrick J. Wong814525f2012-04-29 18:31:10 -0400113 return;
114
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120}
121
Jan Kara678aaf42008-07-11 19:27:31 -0400122static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 loff_t new_size)
124{
Theodore Ts'o7ff9c072010-11-08 13:51:33 -0500125 trace_ext4_begin_ordered_truncate(inode, new_size);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500126 /*
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
131 */
132 if (!EXT4_I(inode)->jinode)
133 return 0;
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
136 new_size);
Jan Kara678aaf42008-07-11 19:27:31 -0400137}
138
Lukas Czernerd47992f2013-05-21 23:17:23 -0400139static void ext4_invalidatepage(struct page *page, unsigned int offset,
140 unsigned int length);
Theodore Ts'ocb20d512010-10-27 21:30:09 -0400141static int __ext4_journalled_writepage(struct page *page, unsigned int len);
142static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
Tahsin Erdogandec214d2017-06-22 11:44:55 -0400143static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
144 int pextents);
Alex Tomas64769242008-07-11 19:27:31 -0400145
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700146/*
147 * Test whether an inode is a fast symlink.
Tahsin Erdogan407cd7f2017-07-04 00:11:21 -0400148 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700149 */
Theodore Ts'of348c252015-04-16 01:55:00 -0400150int ext4_inode_is_fast_symlink(struct inode *inode)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700151{
Andi Kleenfc822282017-12-03 20:38:01 -0500152 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
153 int ea_blocks = EXT4_I(inode)->i_file_acl ?
154 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
155
156 if (ext4_has_inline_data(inode))
157 return 0;
158
159 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160 }
Tahsin Erdogan407cd7f2017-07-04 00:11:21 -0400161 return S_ISLNK(inode->i_mode) && inode->i_size &&
162 (inode->i_size < EXT4_N_BLOCKS * 4);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700163}
164
165/*
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700166 * Called at the last iput() if i_nlink is zero.
167 */
Al Viro0930fcc2010-06-07 13:16:22 -0400168void ext4_evict_inode(struct inode *inode)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700169{
170 handle_t *handle;
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400171 int err;
Jan Kara65db8692019-11-05 17:44:12 +0100172 /*
173 * Credits for final inode cleanup and freeing:
174 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
175 * (xattr block freeing), bitmap, group descriptor (inode freeing)
176 */
177 int extra_credits = 6;
Tahsin Erdogan0421a182017-06-22 10:26:31 -0400178 struct ext4_xattr_inode_array *ea_inode_array = NULL;
Jan Kara46e294e2020-11-27 12:06:49 +0100179 bool freeze_protected = false;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700180
Theodore Ts'o7ff9c072010-11-08 13:51:33 -0500181 trace_ext4_evict_inode(inode);
Jiaying Zhang2581fdc2011-08-13 12:17:13 -0400182
Al Viro0930fcc2010-06-07 13:16:22 -0400183 if (inode->i_nlink) {
Jan Kara2d859db2011-07-26 09:07:11 -0400184 /*
185 * When journalling data dirty buffers are tracked only in the
186 * journal. So although mm thinks everything is clean and
187 * ready for reaping the inode might still have some pages to
188 * write in the running transaction or waiting to be
189 * checkpointed. Thus calling jbd2_journal_invalidatepage()
190 * (via truncate_inode_pages()) to discard these buffers can
191 * cause data loss. Also even if we did not discard these
192 * buffers, we would have no way to find them after the inode
193 * is reaped and thus user could see stale data if he tries to
194 * read them before the transaction is checkpointed. So be
195 * careful and force everything to disk here... We use
196 * ei->i_datasync_tid to store the newest transaction
197 * containing inode's data.
198 *
199 * Note that directories do not have this problem because they
200 * don't use page cache.
201 */
Vegard Nossum6a7fd522016-07-04 11:03:00 -0400202 if (inode->i_ino != EXT4_JOURNAL_INO &&
203 ext4_should_journal_data(inode) &&
Jan Kara3abb1a02017-06-22 23:49:46 -0400204 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
205 inode->i_data.nrpages) {
Jan Kara2d859db2011-07-26 09:07:11 -0400206 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
207 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
208
Theodore Ts'od76a3a772013-04-03 22:02:52 -0400209 jbd2_complete_transaction(journal, commit_tid);
Jan Kara2d859db2011-07-26 09:07:11 -0400210 filemap_write_and_wait(&inode->i_data);
211 }
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700212 truncate_inode_pages_final(&inode->i_data);
Jan Kara5dc23bd2013-06-04 14:46:12 -0400213
Al Viro0930fcc2010-06-07 13:16:22 -0400214 goto no_delete;
215 }
216
Theodore Ts'oe2bfb082014-10-05 22:47:07 -0400217 if (is_bad_inode(inode))
218 goto no_delete;
219 dquot_initialize(inode);
Christoph Hellwig907f4552010-03-03 09:05:06 -0500220
Jan Kara678aaf42008-07-11 19:27:31 -0400221 if (ext4_should_order_data(inode))
222 ext4_begin_ordered_truncate(inode, 0);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700223 truncate_inode_pages_final(&inode->i_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700224
Jan Kara8e8ad8a2012-06-12 16:20:38 +0200225 /*
Jan Karaceff86f2020-04-21 10:54:45 +0200226 * For inodes with journalled data, transaction commit could have
227 * dirtied the inode. Flush worker is ignoring it because of I_FREEING
228 * flag but we still need to remove the inode from the writeback lists.
229 */
230 if (!list_empty_careful(&inode->i_io_list)) {
231 WARN_ON_ONCE(!ext4_should_journal_data(inode));
232 inode_io_list_del(inode);
233 }
234
235 /*
Jan Kara8e8ad8a2012-06-12 16:20:38 +0200236 * Protect us against freezing - iput() caller didn't have to have any
Jan Kara46e294e2020-11-27 12:06:49 +0100237 * protection against it. When we are in a running transaction though,
238 * we are already protected against freezing and we cannot grab further
239 * protection due to lock ordering constraints.
Jan Kara8e8ad8a2012-06-12 16:20:38 +0200240 */
Jan Kara46e294e2020-11-27 12:06:49 +0100241 if (!ext4_journal_current_handle()) {
242 sb_start_intwrite(inode->i_sb);
243 freeze_protected = true;
244 }
Andreas Dilgere50e5122017-06-21 21:10:32 -0400245
Tahsin Erdogan30a7eb92017-06-22 11:42:09 -0400246 if (!IS_NOQUOTA(inode))
247 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
248
Jan Kara65db8692019-11-05 17:44:12 +0100249 /*
250 * Block bitmap, group descriptor, and inode are accounted in both
251 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
252 */
Tahsin Erdogan30a7eb92017-06-22 11:42:09 -0400253 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
Jan Kara65db8692019-11-05 17:44:12 +0100254 ext4_blocks_for_truncate(inode) + extra_credits - 3);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700255 if (IS_ERR(handle)) {
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400256 ext4_std_error(inode->i_sb, PTR_ERR(handle));
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700257 /*
258 * If we're going to skip the normal cleanup, we still need to
259 * make sure that the in-core orphan linked list is properly
260 * cleaned up.
261 */
Mingming Cao617ba132006-10-11 01:20:53 -0700262 ext4_orphan_del(NULL, inode);
Jan Kara46e294e2020-11-27 12:06:49 +0100263 if (freeze_protected)
264 sb_end_intwrite(inode->i_sb);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700265 goto no_delete;
266 }
Tahsin Erdogan30a7eb92017-06-22 11:42:09 -0400267
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700268 if (IS_SYNC(inode))
Frank Mayhar03901312009-01-07 00:06:22 -0500269 ext4_handle_sync(handle);
Tahsin Erdogan407cd7f2017-07-04 00:11:21 -0400270
271 /*
272 * Set inode->i_size to 0 before calling ext4_truncate(). We need
273 * special handling of symlinks here because i_size is used to
274 * determine whether ext4_inode_info->i_data contains symlink data or
275 * block mappings. Setting i_size to 0 will remove its fast symlink
276 * status. Erase i_data so that it becomes a valid empty block map.
277 */
278 if (ext4_inode_is_fast_symlink(inode))
279 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700280 inode->i_size = 0;
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400281 err = ext4_mark_inode_dirty(handle, inode);
282 if (err) {
Eric Sandeen12062dd2010-02-15 14:19:27 -0500283 ext4_warning(inode->i_sb,
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400284 "couldn't mark inode dirty (err %d)", err);
285 goto stop_handle;
286 }
Theodore Ts'o2c98eb52016-11-13 22:02:26 -0500287 if (inode->i_blocks) {
288 err = ext4_truncate(inode);
289 if (err) {
Theodore Ts'o54d3adb2020-03-28 19:33:43 -0400290 ext4_error_err(inode->i_sb, -err,
291 "couldn't truncate inode %lu (err %d)",
292 inode->i_ino, err);
Theodore Ts'o2c98eb52016-11-13 22:02:26 -0500293 goto stop_handle;
294 }
295 }
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400296
Tahsin Erdogan30a7eb92017-06-22 11:42:09 -0400297 /* Remove xattr references. */
298 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
299 extra_credits);
300 if (err) {
301 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
302stop_handle:
303 ext4_journal_stop(handle);
304 ext4_orphan_del(NULL, inode);
Jan Kara46e294e2020-11-27 12:06:49 +0100305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
Tahsin Erdogan30a7eb92017-06-22 11:42:09 -0400307 ext4_xattr_inode_array_free(ea_inode_array);
308 goto no_delete;
Theodore Ts'obc965ab2008-08-02 21:10:38 -0400309 }
310
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700311 /*
Mingming Cao617ba132006-10-11 01:20:53 -0700312 * Kill off the orphan record which ext4_truncate created.
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700313 * AKPM: I think this can be inside the above `if'.
Mingming Cao617ba132006-10-11 01:20:53 -0700314 * Note that ext4_orphan_del() has to be able to cope with the
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700315 * deletion of a non-existent orphan - this is because we don't
Mingming Cao617ba132006-10-11 01:20:53 -0700316 * know if ext4_truncate() actually created an orphan record.
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700317 * (Well, we could do this if we need to, but heck - it works)
318 */
Mingming Cao617ba132006-10-11 01:20:53 -0700319 ext4_orphan_del(handle, inode);
Arnd Bergmann5ffff832018-07-29 15:50:00 -0400320 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700321
322 /*
323 * One subtle ordering requirement: if anything has gone wrong
324 * (transaction abort, IO errors, whatever), then we can still
325 * do these next steps (the fs will already have been marked as
326 * having errors), but we can't free the inode if the mark_dirty
327 * fails.
328 */
Mingming Cao617ba132006-10-11 01:20:53 -0700329 if (ext4_mark_inode_dirty(handle, inode))
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700330 /* If that failed, just do the required in-core inode clear. */
Al Viro0930fcc2010-06-07 13:16:22 -0400331 ext4_clear_inode(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700332 else
Mingming Cao617ba132006-10-11 01:20:53 -0700333 ext4_free_inode(handle, inode);
334 ext4_journal_stop(handle);
Jan Kara46e294e2020-11-27 12:06:49 +0100335 if (freeze_protected)
336 sb_end_intwrite(inode->i_sb);
Tahsin Erdogan0421a182017-06-22 10:26:31 -0400337 ext4_xattr_inode_array_free(ea_inode_array);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700338 return;
339no_delete:
Harshad Shirwadkarb21ebf12020-11-05 19:58:51 -0800340 if (!list_empty(&EXT4_I(inode)->i_fc_list))
341 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
Al Viro0930fcc2010-06-07 13:16:22 -0400342 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700343}
344
Dmitry Monakhova9e7f442009-12-14 15:21:14 +0300345#ifdef CONFIG_QUOTA
346qsize_t *ext4_get_reserved_space(struct inode *inode)
Mingming Cao60e58e02009-01-22 18:13:05 +0100347{
Dmitry Monakhova9e7f442009-12-14 15:21:14 +0300348 return &EXT4_I(inode)->i_reserved_quota;
Mingming Cao60e58e02009-01-22 18:13:05 +0100349}
Dmitry Monakhova9e7f442009-12-14 15:21:14 +0300350#endif
Theodore Ts'o9d0be502010-01-01 02:41:30 -0500351
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400352/*
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500353 * Called with i_data_sem down, which is important since we can call
354 * ext4_discard_preallocations() from here.
355 */
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500356void ext4_da_update_reserve_space(struct inode *inode,
357 int used, int quota_claim)
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400358{
359 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500360 struct ext4_inode_info *ei = EXT4_I(inode);
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400361
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500362 spin_lock(&ei->i_block_reservation_lock);
Aditya Kalid8990242011-09-09 19:18:51 -0400363 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500364 if (unlikely(used > ei->i_reserved_data_blocks)) {
Theodore Ts'o8de5c322013-02-14 15:11:41 -0500365 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
Theodore Ts'o1084f252012-03-19 23:13:43 -0400366 "with only %d reserved data blocks",
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500367 __func__, inode->i_ino, used,
368 ei->i_reserved_data_blocks);
369 WARN_ON(1);
370 used = ei->i_reserved_data_blocks;
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -0400371 }
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400372
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500373 /* Update per-inode reservations */
374 ei->i_reserved_data_blocks -= used;
Theodore Ts'o71d4f7d2014-07-15 06:02:38 -0400375 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500376
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400377 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
Mingming Cao60e58e02009-01-22 18:13:05 +0100378
Eric Sandeen72b8ab92010-05-16 11:00:00 -0400379 /* Update quota subsystem for data blocks */
380 if (quota_claim)
Aditya Kali7b415bf2011-09-09 19:04:51 -0400381 dquot_claim_block(inode, EXT4_C2B(sbi, used));
Eric Sandeen72b8ab92010-05-16 11:00:00 -0400382 else {
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500383 /*
384 * We did fallocate with an offset that is already delayed
385 * allocated. So on delayed allocated writeback we should
Eric Sandeen72b8ab92010-05-16 11:00:00 -0400386 * not re-claim the quota for fallocated blocks.
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500387 */
Aditya Kali7b415bf2011-09-09 19:04:51 -0400388 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500389 }
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -0400390
391 /*
392 * If we have done all the pending block allocations and if
393 * there aren't any writers on the inode, we can discard the
394 * inode's preallocations.
395 */
Theodore Ts'o0637c6f2009-12-30 14:20:45 -0500396 if ((ei->i_reserved_data_blocks == 0) &&
Nikolay Borisov82dd1242019-02-10 23:04:16 -0500397 !inode_is_open_for_write(inode))
brookxu27bc4462020-08-17 15:36:15 +0800398 ext4_discard_preallocations(inode, 0);
Aneesh Kumar K.V12219ae2008-07-17 16:12:08 -0400399}
400
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400401static int __check_block_validity(struct inode *inode, const char *func,
Theodore Ts'oc398eda2010-07-27 11:56:40 -0400402 unsigned int line,
403 struct ext4_map_blocks *map)
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400404{
Theodore Ts'o345c0db2019-04-09 23:37:08 -0400405 if (ext4_has_feature_journal(inode->i_sb) &&
406 (inode->i_ino ==
407 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
408 return 0;
Jan Karace9f24c2020-07-28 15:04:34 +0200409 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
Theodore Ts'oc398eda2010-07-27 11:56:40 -0400410 ext4_error_inode(inode, func, line, map->m_pblk,
Theodore Ts'obdbd6ce2018-06-15 12:27:16 -0400411 "lblock %lu mapped to illegal pblock %llu "
Theodore Ts'oc398eda2010-07-27 11:56:40 -0400412 "(length %d)", (unsigned long) map->m_lblk,
Theodore Ts'obdbd6ce2018-06-15 12:27:16 -0400413 map->m_pblk, map->m_len);
Darrick J. Wong6a797d22015-10-17 16:16:04 -0400414 return -EFSCORRUPTED;
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400415 }
416 return 0;
417}
418
Jan Kara53085fa2015-12-07 15:09:35 -0500419int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
420 ext4_lblk_t len)
421{
422 int ret;
423
Eric Biggers33b4cc22019-12-26 10:10:22 -0600424 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400425 return fscrypt_zeroout_range(inode, lblk, pblk, len);
Jan Kara53085fa2015-12-07 15:09:35 -0500426
427 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
428 if (ret > 0)
429 ret = 0;
430
431 return ret;
432}
433
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400434#define check_block_validity(inode, map) \
Theodore Ts'oc398eda2010-07-27 11:56:40 -0400435 __check_block_validity((inode), __func__, __LINE__, (map))
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400436
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400437#ifdef ES_AGGRESSIVE_TEST
438static void ext4_map_blocks_es_recheck(handle_t *handle,
439 struct inode *inode,
440 struct ext4_map_blocks *es_map,
441 struct ext4_map_blocks *map,
442 int flags)
443{
444 int retval;
445
446 map->m_flags = 0;
447 /*
448 * There is a race window that the result is not the same.
449 * e.g. xfstests #223 when dioread_nolock enables. The reason
450 * is that we lookup a block mapping in extent status tree with
451 * out taking i_data_sem. So at the time the unwritten extent
452 * could be converted.
453 */
Jan Kara2dcba472015-12-07 15:04:57 -0500454 down_read(&EXT4_I(inode)->i_data_sem);
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400455 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
Eric Whitney9e524842020-04-15 16:31:39 -0400456 retval = ext4_ext_map_blocks(handle, inode, map, 0);
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400457 } else {
Eric Whitney9e524842020-04-15 16:31:39 -0400458 retval = ext4_ind_map_blocks(handle, inode, map, 0);
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400459 }
Jan Kara2dcba472015-12-07 15:04:57 -0500460 up_read((&EXT4_I(inode)->i_data_sem));
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400461
462 /*
463 * We don't check m_len because extent will be collpased in status
464 * tree. So the m_len might not equal.
465 */
466 if (es_map->m_lblk != map->m_lblk ||
467 es_map->m_flags != map->m_flags ||
468 es_map->m_pblk != map->m_pblk) {
Theodore Ts'obdafe422013-07-13 00:40:31 -0400469 printk("ES cache assertion failed for inode: %lu "
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400470 "es_cached ex [%d/%d/%llu/%x] != "
471 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
472 inode->i_ino, es_map->m_lblk, es_map->m_len,
473 es_map->m_pblk, es_map->m_flags, map->m_lblk,
474 map->m_len, map->m_pblk, map->m_flags,
475 retval, flags);
476 }
477}
478#endif /* ES_AGGRESSIVE_TEST */
479
Theodore Ts'o55138e0b2009-09-29 13:31:31 -0400480/*
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400481 * The ext4_map_blocks() function tries to look up the requested blocks,
Theodore Ts'o2b2d6d02008-07-26 16:15:44 -0400482 * and returns if the blocks are already mapped.
Mingming Caof5ab0d12008-02-25 15:29:55 -0500483 *
Mingming Caof5ab0d12008-02-25 15:29:55 -0500484 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
485 * and store the allocated blocks in the result buffer head and mark it
486 * mapped.
487 *
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400488 * If file type is extents based, it will call ext4_ext_map_blocks(),
489 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
Mingming Caof5ab0d12008-02-25 15:29:55 -0500490 * based files
491 *
Jan Karafacab4d2016-03-09 22:54:00 -0500492 * On success, it returns the number of blocks being mapped or allocated. if
493 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
494 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
Mingming Caof5ab0d12008-02-25 15:29:55 -0500495 *
496 * It returns 0 if plain look up failed (blocks have not been allocated), in
Jan Karafacab4d2016-03-09 22:54:00 -0500497 * that case, @map is returned as unmapped but we still do fill map->m_len to
498 * indicate the length of a hole starting at map->m_lblk.
Mingming Caof5ab0d12008-02-25 15:29:55 -0500499 *
500 * It returns the error in case of allocation failure.
501 */
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400502int ext4_map_blocks(handle_t *handle, struct inode *inode,
503 struct ext4_map_blocks *map, int flags)
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500504{
Zheng Liud100eef2013-02-18 00:29:59 -0500505 struct extent_status es;
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500506 int retval;
Lukas Czernerb8a86842014-03-18 18:05:35 -0400507 int ret = 0;
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400508#ifdef ES_AGGRESSIVE_TEST
509 struct ext4_map_blocks orig_map;
510
511 memcpy(&orig_map, map, sizeof(*map));
512#endif
Mingming Caof5ab0d12008-02-25 15:29:55 -0500513
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400514 map->m_flags = 0;
Ritesh Harjani70aa1552020-05-10 11:54:55 +0530515 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
516 flags, map->m_len, (unsigned long) map->m_lblk);
Zheng Liud100eef2013-02-18 00:29:59 -0500517
Theodore Ts'oe861b5e2014-02-20 12:54:05 -0500518 /*
519 * ext4_map_blocks returns an int, and m_len is an unsigned int
520 */
521 if (unlikely(map->m_len > INT_MAX))
522 map->m_len = INT_MAX;
523
Kazuya Mio4adb6ab2014-04-07 10:53:28 -0400524 /* We can handle the block number less than EXT_MAX_BLOCKS */
525 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
Darrick J. Wong6a797d22015-10-17 16:16:04 -0400526 return -EFSCORRUPTED;
Kazuya Mio4adb6ab2014-04-07 10:53:28 -0400527
Zheng Liud100eef2013-02-18 00:29:59 -0500528 /* Lookup extent status tree firstly */
Harshad Shirwadkar8016e292020-10-15 13:37:59 -0700529 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
530 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
Zheng Liud100eef2013-02-18 00:29:59 -0500531 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
532 map->m_pblk = ext4_es_pblock(&es) +
533 map->m_lblk - es.es_lblk;
534 map->m_flags |= ext4_es_is_written(&es) ?
535 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
536 retval = es.es_len - (map->m_lblk - es.es_lblk);
537 if (retval > map->m_len)
538 retval = map->m_len;
539 map->m_len = retval;
540 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
Jan Karafacab4d2016-03-09 22:54:00 -0500541 map->m_pblk = 0;
542 retval = es.es_len - (map->m_lblk - es.es_lblk);
543 if (retval > map->m_len)
544 retval = map->m_len;
545 map->m_len = retval;
Zheng Liud100eef2013-02-18 00:29:59 -0500546 retval = 0;
547 } else {
Arnd Bergmann1e83bc82019-04-07 12:24:43 -0400548 BUG();
Zheng Liud100eef2013-02-18 00:29:59 -0500549 }
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400550#ifdef ES_AGGRESSIVE_TEST
551 ext4_map_blocks_es_recheck(handle, inode, map,
552 &orig_map, flags);
553#endif
Zheng Liud100eef2013-02-18 00:29:59 -0500554 goto found;
555 }
556
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500557 /*
Theodore Ts'ob920c752009-05-14 00:54:29 -0400558 * Try to see if we can get the block without requesting a new
559 * file system block.
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500560 */
Jan Kara2dcba472015-12-07 15:04:57 -0500561 down_read(&EXT4_I(inode)->i_data_sem);
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400562 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
Eric Whitney9e524842020-04-15 16:31:39 -0400563 retval = ext4_ext_map_blocks(handle, inode, map, 0);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500564 } else {
Eric Whitney9e524842020-04-15 16:31:39 -0400565 retval = ext4_ind_map_blocks(handle, inode, map, 0);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500566 }
Zheng Liuf7fec032013-02-18 00:28:47 -0500567 if (retval > 0) {
Theodore Ts'o3be78c72013-08-16 21:22:41 -0400568 unsigned int status;
Zheng Liuf7fec032013-02-18 00:28:47 -0500569
Zheng Liu44fb851d2013-07-29 12:51:42 -0400570 if (unlikely(retval != map->m_len)) {
571 ext4_warning(inode->i_sb,
572 "ES len assertion failed for inode "
573 "%lu: retval %d != map->m_len %d",
574 inode->i_ino, retval, map->m_len);
575 WARN_ON(1);
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400576 }
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400577
Zheng Liuf7fec032013-02-18 00:28:47 -0500578 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
579 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
580 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
Lukas Czernerd2dc3172015-05-02 21:36:55 -0400581 !(status & EXTENT_STATUS_WRITTEN) &&
Eric Whitneyad431022018-10-01 14:10:39 -0400582 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
583 map->m_lblk + map->m_len - 1))
Zheng Liuf7fec032013-02-18 00:28:47 -0500584 status |= EXTENT_STATUS_DELAYED;
585 ret = ext4_es_insert_extent(inode, map->m_lblk,
586 map->m_len, map->m_pblk, status);
587 if (ret < 0)
588 retval = ret;
589 }
Jan Kara2dcba472015-12-07 15:04:57 -0500590 up_read((&EXT4_I(inode)->i_data_sem));
Mingming Caof5ab0d12008-02-25 15:29:55 -0500591
Zheng Liud100eef2013-02-18 00:29:59 -0500592found:
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
Lukas Czernerb8a86842014-03-18 18:05:35 -0400594 ret = check_block_validity(inode, map);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400595 if (ret != 0)
596 return ret;
597 }
598
Mingming Caof5ab0d12008-02-25 15:29:55 -0500599 /* If it is only a block(s) look up */
Theodore Ts'oc2177052009-05-14 00:58:52 -0400600 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500601 return retval;
602
603 /*
Mingming Caof5ab0d12008-02-25 15:29:55 -0500604 * Returns if the blocks have already allocated
605 *
606 * Note that if blocks have been preallocated
Tao Madf3ab172011-10-08 15:53:49 -0400607 * ext4_ext_get_block() returns the create = 0
Mingming Caof5ab0d12008-02-25 15:29:55 -0500608 * with buffer head unmapped.
609 */
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400610 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
Lukas Czernerb8a86842014-03-18 18:05:35 -0400611 /*
612 * If we need to convert extent to unwritten
613 * we continue and do the actual work in
614 * ext4_ext_map_blocks()
615 */
616 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
617 return retval;
Mingming Caof5ab0d12008-02-25 15:29:55 -0500618
619 /*
Zheng Liua25a4e12013-02-18 00:28:04 -0500620 * Here we clear m_flags because after allocating an new extent,
621 * it will be set again.
Aneesh Kumar K.V2a8964d2009-05-14 17:05:39 -0400622 */
Zheng Liua25a4e12013-02-18 00:28:04 -0500623 map->m_flags &= ~EXT4_MAP_FLAGS;
Aneesh Kumar K.V2a8964d2009-05-14 17:05:39 -0400624
625 /*
Lukas Czerner556615d2014-04-20 23:45:47 -0400626 * New blocks allocate and/or writing to unwritten extent
Mingming Caof5ab0d12008-02-25 15:29:55 -0500627 * will possibly result in updating i_data, so we take
Seunghun Leed91bd2c2014-09-01 22:15:30 -0400628 * the write lock of i_data_sem, and call get_block()
Mingming Caof5ab0d12008-02-25 15:29:55 -0500629 * with create == 1 flag.
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500630 */
Lukas Czernerc8b459f2014-05-12 12:55:07 -0400631 down_write(&EXT4_I(inode)->i_data_sem);
Mingming Caod2a17632008-07-14 17:52:37 -0400632
633 /*
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500634 * We need to check for EXT4 here because migrate
635 * could have changed the inode type in between
636 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400637 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400638 retval = ext4_ext_map_blocks(handle, inode, map, flags);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500639 } else {
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400640 retval = ext4_ind_map_blocks(handle, inode, map, flags);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400641
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400642 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400643 /*
644 * We allocated new blocks which will result in
645 * i_data's format changing. Force the migrate
646 * to fail by clearing migrate flags
647 */
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500648 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400649 }
Mingming Caod2a17632008-07-14 17:52:37 -0400650
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500651 /*
652 * Update reserved blocks/metadata blocks after successful
653 * block allocation which had been deferred till now. We don't
654 * support fallocate for non extent files. So we can update
655 * reserve space here.
656 */
657 if ((retval > 0) &&
Aneesh Kumar K.V1296cc82010-01-15 01:27:59 -0500658 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
Aneesh Kumar K.V5f634d02010-01-25 04:00:31 -0500659 ext4_da_update_reserve_space(inode, retval, 1);
660 }
Theodore Ts'o2ac3b6e2009-05-14 13:57:08 -0400661
Zheng Liuf7fec032013-02-18 00:28:47 -0500662 if (retval > 0) {
Theodore Ts'o3be78c72013-08-16 21:22:41 -0400663 unsigned int status;
Zheng Liuf7fec032013-02-18 00:28:47 -0500664
Zheng Liu44fb851d2013-07-29 12:51:42 -0400665 if (unlikely(retval != map->m_len)) {
666 ext4_warning(inode->i_sb,
667 "ES len assertion failed for inode "
668 "%lu: retval %d != map->m_len %d",
669 inode->i_ino, retval, map->m_len);
670 WARN_ON(1);
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400671 }
Dmitry Monakhov921f2662013-03-10 21:01:03 -0400672
Zheng Liuadb23552013-03-10 21:13:05 -0400673 /*
Jan Karac86d8db2015-12-07 15:10:26 -0500674 * We have to zeroout blocks before inserting them into extent
675 * status tree. Otherwise someone could look them up there and
Jan Kara9b623df2016-09-30 02:02:29 -0400676 * use them before they are really zeroed. We also have to
677 * unmap metadata before zeroing as otherwise writeback can
678 * overwrite zeros with stale data from block device.
Jan Karac86d8db2015-12-07 15:10:26 -0500679 */
680 if (flags & EXT4_GET_BLOCKS_ZERO &&
681 map->m_flags & EXT4_MAP_MAPPED &&
682 map->m_flags & EXT4_MAP_NEW) {
683 ret = ext4_issue_zeroout(inode, map->m_lblk,
684 map->m_pblk, map->m_len);
685 if (ret) {
686 retval = ret;
687 goto out_sem;
688 }
689 }
690
691 /*
Zheng Liuadb23552013-03-10 21:13:05 -0400692 * If the extent has been zeroed out, we don't need to update
693 * extent status tree.
694 */
695 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
Theodore Ts'obb5835e2019-08-11 16:32:41 -0400696 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
Zheng Liuadb23552013-03-10 21:13:05 -0400697 if (ext4_es_is_written(&es))
Jan Karac86d8db2015-12-07 15:10:26 -0500698 goto out_sem;
Zheng Liuadb23552013-03-10 21:13:05 -0400699 }
Zheng Liuf7fec032013-02-18 00:28:47 -0500700 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
701 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
702 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
Lukas Czernerd2dc3172015-05-02 21:36:55 -0400703 !(status & EXTENT_STATUS_WRITTEN) &&
Eric Whitneyad431022018-10-01 14:10:39 -0400704 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
705 map->m_lblk + map->m_len - 1))
Zheng Liuf7fec032013-02-18 00:28:47 -0500706 status |= EXTENT_STATUS_DELAYED;
707 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
708 map->m_pblk, status);
Jan Karac86d8db2015-12-07 15:10:26 -0500709 if (ret < 0) {
Zheng Liuf7fec032013-02-18 00:28:47 -0500710 retval = ret;
Jan Karac86d8db2015-12-07 15:10:26 -0500711 goto out_sem;
712 }
Aditya Kali5356f2612011-09-09 19:20:51 -0400713 }
714
Jan Karac86d8db2015-12-07 15:10:26 -0500715out_sem:
Aneesh Kumar K.V4df3d262008-01-28 23:58:29 -0500716 up_write((&EXT4_I(inode)->i_data_sem));
Theodore Ts'oe35fd662010-05-16 19:00:00 -0400717 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
Lukas Czernerb8a86842014-03-18 18:05:35 -0400718 ret = check_block_validity(inode, map);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400719 if (ret != 0)
720 return ret;
Jan Kara06bd3c32016-04-24 00:56:03 -0400721
722 /*
723 * Inodes with freshly allocated blocks where contents will be
724 * visible after transaction commit must be on transaction's
725 * ordered data list.
726 */
727 if (map->m_flags & EXT4_MAP_NEW &&
728 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
729 !(flags & EXT4_GET_BLOCKS_ZERO) &&
Tahsin Erdogan02749a42017-06-22 11:31:25 -0400730 !ext4_is_quota_file(inode) &&
Jan Kara06bd3c32016-04-24 00:56:03 -0400731 ext4_should_order_data(inode)) {
Ross Zwisler73131fb2019-06-20 17:26:26 -0400732 loff_t start_byte =
733 (loff_t)map->m_lblk << inode->i_blkbits;
734 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
735
Jan Karaee0876b2016-04-24 00:56:08 -0400736 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
Ross Zwisler73131fb2019-06-20 17:26:26 -0400737 ret = ext4_jbd2_inode_add_wait(handle, inode,
738 start_byte, length);
Jan Karaee0876b2016-04-24 00:56:08 -0400739 else
Ross Zwisler73131fb2019-06-20 17:26:26 -0400740 ret = ext4_jbd2_inode_add_write(handle, inode,
741 start_byte, length);
Jan Kara06bd3c32016-04-24 00:56:03 -0400742 if (ret)
743 return ret;
744 }
Harshad Shirwadkara80f7fc2020-11-05 19:58:53 -0800745 ext4_fc_track_range(handle, inode, map->m_lblk,
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -0700746 map->m_lblk + map->m_len - 1);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -0400747 }
Ritesh Harjaniec8c60b2020-05-10 11:54:52 +0530748
749 if (retval < 0)
Ritesh Harjani70aa1552020-05-10 11:54:55 +0530750 ext_debug(inode, "failed with err %d\n", retval);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -0500751 return retval;
752}
753
Jan Karaed8ad832016-02-19 00:18:25 -0500754/*
755 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
756 * we have to be careful as someone else may be manipulating b_state as well.
757 */
758static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
759{
760 unsigned long old_state;
761 unsigned long new_state;
762
763 flags &= EXT4_MAP_FLAGS;
764
765 /* Dummy buffer_head? Set non-atomically. */
766 if (!bh->b_page) {
767 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
768 return;
769 }
770 /*
771 * Someone else may be modifying b_state. Be careful! This is ugly but
772 * once we get rid of using bh as a container for mapping information
773 * to pass to / from get_block functions, this can go away.
774 */
775 do {
776 old_state = READ_ONCE(bh->b_state);
777 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
778 } while (unlikely(
779 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
780}
781
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400782static int _ext4_get_block(struct inode *inode, sector_t iblock,
783 struct buffer_head *bh, int flags)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700784{
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400785 struct ext4_map_blocks map;
Jan Karaefe70c22016-03-08 23:35:46 -0500786 int ret = 0;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700787
Tao Ma46c7f252012-12-10 14:04:52 -0500788 if (ext4_has_inline_data(inode))
789 return -ERANGE;
790
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400791 map.m_lblk = iblock;
792 map.m_len = bh->b_size >> inode->i_blkbits;
793
Jan Karaefe70c22016-03-08 23:35:46 -0500794 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
795 flags);
Jan Kara7fb54092008-02-10 01:08:38 -0500796 if (ret > 0) {
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400797 map_bh(bh, inode->i_sb, map.m_pblk);
Jan Karaed8ad832016-02-19 00:18:25 -0500798 ext4_update_bh_state(bh, map.m_flags);
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400799 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Jan Kara7fb54092008-02-10 01:08:38 -0500800 ret = 0;
Ross Zwisler547edce2016-11-08 11:30:58 +1100801 } else if (ret == 0) {
802 /* hole case, need to fill in bh->b_size */
803 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700804 }
805 return ret;
806}
807
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400808int ext4_get_block(struct inode *inode, sector_t iblock,
809 struct buffer_head *bh, int create)
810{
811 return _ext4_get_block(inode, iblock, bh,
812 create ? EXT4_GET_BLOCKS_CREATE : 0);
813}
814
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700815/*
Jan Kara705965b2016-03-08 23:08:10 -0500816 * Get block function used when preparing for buffered write if we require
817 * creating an unwritten extent if blocks haven't been allocated. The extent
818 * will be converted to written after the IO is complete.
819 */
820int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
821 struct buffer_head *bh_result, int create)
822{
823 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
824 inode->i_ino, create);
825 return _ext4_get_block(inode, iblock, bh_result,
826 EXT4_GET_BLOCKS_IO_CREATE_EXT);
827}
828
Jan Karaefe70c22016-03-08 23:35:46 -0500829/* Maximum number of blocks we map for direct IO at once. */
830#define DIO_MAX_BLOCKS 4096
831
Jan Karae84dfbe2016-04-01 02:07:22 -0400832/*
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700833 * `handle' can be NULL if create is zero
834 */
Mingming Cao617ba132006-10-11 01:20:53 -0700835struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
Theodore Ts'oc5e298a2015-06-21 01:25:29 -0400836 ext4_lblk_t block, int map_flags)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700837{
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400838 struct ext4_map_blocks map;
839 struct buffer_head *bh;
Theodore Ts'oc5e298a2015-06-21 01:25:29 -0400840 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
Theodore Ts'o10560082014-08-29 20:51:32 -0400841 int err;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700842
Chunguang Xu837c23f2020-11-07 23:58:11 +0800843 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
844 || handle != NULL || create == 0);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700845
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400846 map.m_lblk = block;
847 map.m_len = 1;
Theodore Ts'oc5e298a2015-06-21 01:25:29 -0400848 err = ext4_map_blocks(handle, inode, &map, map_flags);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700849
Theodore Ts'o10560082014-08-29 20:51:32 -0400850 if (err == 0)
851 return create ? ERR_PTR(-ENOSPC) : NULL;
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400852 if (err < 0)
Theodore Ts'o10560082014-08-29 20:51:32 -0400853 return ERR_PTR(err);
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400854
855 bh = sb_getblk(inode->i_sb, map.m_pblk);
Theodore Ts'o10560082014-08-29 20:51:32 -0400856 if (unlikely(!bh))
857 return ERR_PTR(-ENOMEM);
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400858 if (map.m_flags & EXT4_MAP_NEW) {
Chunguang Xu837c23f2020-11-07 23:58:11 +0800859 ASSERT(create != 0);
860 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
861 || (handle != NULL));
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400862
863 /*
864 * Now that we do not always journal data, we should
865 * keep in mind whether this should always journal the
866 * new buffer as metadata. For now, regular file
867 * writes use ext4_get_block instead, so it's not a
868 * problem.
869 */
870 lock_buffer(bh);
871 BUFFER_TRACE(bh, "call get_create_access");
Theodore Ts'o10560082014-08-29 20:51:32 -0400872 err = ext4_journal_get_create_access(handle, bh);
873 if (unlikely(err)) {
874 unlock_buffer(bh);
875 goto errout;
876 }
877 if (!buffer_uptodate(bh)) {
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400878 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
879 set_buffer_uptodate(bh);
880 }
881 unlock_buffer(bh);
882 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
883 err = ext4_handle_dirty_metadata(handle, inode, bh);
Theodore Ts'o10560082014-08-29 20:51:32 -0400884 if (unlikely(err))
885 goto errout;
886 } else
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400887 BUFFER_TRACE(bh, "not a new buffer");
Theodore Ts'o2ed88682010-05-16 20:00:00 -0400888 return bh;
Theodore Ts'o10560082014-08-29 20:51:32 -0400889errout:
890 brelse(bh);
891 return ERR_PTR(err);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700892}
893
Mingming Cao617ba132006-10-11 01:20:53 -0700894struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
Theodore Ts'oc5e298a2015-06-21 01:25:29 -0400895 ext4_lblk_t block, int map_flags)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700896{
Theodore Ts'oaf5bc922008-09-08 22:25:24 -0400897 struct buffer_head *bh;
zhangyi (F)2d069c02020-09-24 15:33:33 +0800898 int ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700899
Theodore Ts'oc5e298a2015-06-21 01:25:29 -0400900 bh = ext4_getblk(handle, inode, block, map_flags);
Theodore Ts'o1c215022014-08-29 20:52:15 -0400901 if (IS_ERR(bh))
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700902 return bh;
ZhangXiaoxu7963e5a2019-08-22 23:00:32 -0400903 if (!bh || ext4_buffer_uptodate(bh))
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700904 return bh;
zhangyi (F)2d069c02020-09-24 15:33:33 +0800905
906 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
907 if (ret) {
908 put_bh(bh);
909 return ERR_PTR(ret);
910 }
911 return bh;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700912}
913
Tahsin Erdogan9699d4f2017-08-06 00:07:01 -0400914/* Read a contiguous batch of blocks. */
915int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
916 bool wait, struct buffer_head **bhs)
917{
918 int i, err;
919
920 for (i = 0; i < bh_count; i++) {
921 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
922 if (IS_ERR(bhs[i])) {
923 err = PTR_ERR(bhs[i]);
924 bh_count = i;
925 goto out_brelse;
926 }
927 }
928
929 for (i = 0; i < bh_count; i++)
930 /* Note that NULL bhs[i] is valid because of holes. */
zhangyi (F)2d069c02020-09-24 15:33:33 +0800931 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
932 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
Tahsin Erdogan9699d4f2017-08-06 00:07:01 -0400933
934 if (!wait)
935 return 0;
936
937 for (i = 0; i < bh_count; i++)
938 if (bhs[i])
939 wait_on_buffer(bhs[i]);
940
941 for (i = 0; i < bh_count; i++) {
942 if (bhs[i] && !buffer_uptodate(bhs[i])) {
943 err = -EIO;
944 goto out_brelse;
945 }
946 }
947 return 0;
948
949out_brelse:
950 for (i = 0; i < bh_count; i++) {
951 brelse(bhs[i]);
952 bhs[i] = NULL;
953 }
954 return err;
955}
956
Tao Maf19d5872012-12-10 14:05:51 -0500957int ext4_walk_page_buffers(handle_t *handle,
958 struct buffer_head *head,
959 unsigned from,
960 unsigned to,
961 int *partial,
962 int (*fn)(handle_t *handle,
963 struct buffer_head *bh))
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700964{
965 struct buffer_head *bh;
966 unsigned block_start, block_end;
967 unsigned blocksize = head->b_size;
968 int err, ret = 0;
969 struct buffer_head *next;
970
Theodore Ts'oaf5bc922008-09-08 22:25:24 -0400971 for (bh = head, block_start = 0;
972 ret == 0 && (bh != head || !block_start);
Theodore Ts'ode9a55b2009-06-14 17:45:34 -0400973 block_start = block_end, bh = next) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700974 next = bh->b_this_page;
975 block_end = block_start + blocksize;
976 if (block_end <= from || block_start >= to) {
977 if (partial && !buffer_uptodate(bh))
978 *partial = 1;
979 continue;
980 }
981 err = (*fn)(handle, bh);
982 if (!ret)
983 ret = err;
984 }
985 return ret;
986}
987
988/*
989 * To preserve ordering, it is essential that the hole instantiation and
990 * the data write be encapsulated in a single transaction. We cannot
Mingming Cao617ba132006-10-11 01:20:53 -0700991 * close off a transaction and start a new one between the ext4_get_block()
Mingming Caodab291a2006-10-11 01:21:01 -0700992 * and the commit_write(). So doing the jbd2_journal_start at the start of
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700993 * prepare_write() is the right place.
994 *
Jan Kara36ade452013-01-28 09:30:52 -0500995 * Also, this function can nest inside ext4_writepage(). In that case, we
996 * *know* that ext4_writepage() has generated enough buffer credits to do the
997 * whole page. So we won't block on the journal in that case, which is good,
998 * because the caller may be PF_MEMALLOC.
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700999 *
Mingming Cao617ba132006-10-11 01:20:53 -07001000 * By accident, ext4 can be reentered when a transaction is open via
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001001 * quota file writes. If we were to commit the transaction while thus
1002 * reentered, there can be a deadlock - we would be holding a quota
1003 * lock, and the commit would never complete if another thread had a
1004 * transaction open and was blocking on the quota lock - a ranking
1005 * violation.
1006 *
Mingming Caodab291a2006-10-11 01:21:01 -07001007 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001008 * will _not_ run commit under these circumstances because handle->h_ref
1009 * is elevated. We'll still have enough credits for the tiny quotafile
1010 * write.
1011 */
Tao Maf19d5872012-12-10 14:05:51 -05001012int do_journal_get_write_access(handle_t *handle,
1013 struct buffer_head *bh)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001014{
Jan Kara56d35a42010-08-05 14:41:42 -04001015 int dirty = buffer_dirty(bh);
1016 int ret;
1017
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001018 if (!buffer_mapped(bh) || buffer_freed(bh))
1019 return 0;
Jan Kara56d35a42010-08-05 14:41:42 -04001020 /*
Christoph Hellwigebdec242010-10-06 10:47:23 +02001021 * __block_write_begin() could have dirtied some buffers. Clean
Jan Kara56d35a42010-08-05 14:41:42 -04001022 * the dirty bit as jbd2_journal_get_write_access() could complain
1023 * otherwise about fs integrity issues. Setting of the dirty bit
Christoph Hellwigebdec242010-10-06 10:47:23 +02001024 * by __block_write_begin() isn't a real problem here as we clear
Jan Kara56d35a42010-08-05 14:41:42 -04001025 * the bit before releasing a page lock and thus writeback cannot
1026 * ever write the buffer.
1027 */
1028 if (dirty)
1029 clear_buffer_dirty(bh);
liang xie5d601252014-05-12 22:06:43 -04001030 BUFFER_TRACE(bh, "get write access");
Jan Kara56d35a42010-08-05 14:41:42 -04001031 ret = ext4_journal_get_write_access(handle, bh);
1032 if (!ret && dirty)
1033 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1034 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001035}
1036
Chandan Rajendra643fa962018-12-12 15:20:12 +05301037#ifdef CONFIG_FS_ENCRYPTION
Michael Halcrow2058f832015-04-12 00:55:10 -04001038static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1039 get_block_t *get_block)
1040{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001041 unsigned from = pos & (PAGE_SIZE - 1);
Michael Halcrow2058f832015-04-12 00:55:10 -04001042 unsigned to = from + len;
1043 struct inode *inode = page->mapping->host;
1044 unsigned block_start, block_end;
1045 sector_t block;
1046 int err = 0;
1047 unsigned blocksize = inode->i_sb->s_blocksize;
1048 unsigned bbits;
Chandan Rajendra0b578f32019-05-20 09:29:50 -07001049 struct buffer_head *bh, *head, *wait[2];
1050 int nr_wait = 0;
1051 int i;
Michael Halcrow2058f832015-04-12 00:55:10 -04001052
1053 BUG_ON(!PageLocked(page));
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001054 BUG_ON(from > PAGE_SIZE);
1055 BUG_ON(to > PAGE_SIZE);
Michael Halcrow2058f832015-04-12 00:55:10 -04001056 BUG_ON(from > to);
1057
1058 if (!page_has_buffers(page))
1059 create_empty_buffers(page, blocksize, 0);
1060 head = page_buffers(page);
1061 bbits = ilog2(blocksize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001062 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
Michael Halcrow2058f832015-04-12 00:55:10 -04001063
1064 for (bh = head, block_start = 0; bh != head || !block_start;
1065 block++, block_start = block_end, bh = bh->b_this_page) {
1066 block_end = block_start + blocksize;
1067 if (block_end <= from || block_start >= to) {
1068 if (PageUptodate(page)) {
1069 if (!buffer_uptodate(bh))
1070 set_buffer_uptodate(bh);
1071 }
1072 continue;
1073 }
1074 if (buffer_new(bh))
1075 clear_buffer_new(bh);
1076 if (!buffer_mapped(bh)) {
1077 WARN_ON(bh->b_size != blocksize);
1078 err = get_block(inode, block, bh, 1);
1079 if (err)
1080 break;
1081 if (buffer_new(bh)) {
Michael Halcrow2058f832015-04-12 00:55:10 -04001082 if (PageUptodate(page)) {
1083 clear_buffer_new(bh);
1084 set_buffer_uptodate(bh);
1085 mark_buffer_dirty(bh);
1086 continue;
1087 }
1088 if (block_end > to || block_start < from)
1089 zero_user_segments(page, to, block_end,
1090 block_start, from);
1091 continue;
1092 }
1093 }
1094 if (PageUptodate(page)) {
1095 if (!buffer_uptodate(bh))
1096 set_buffer_uptodate(bh);
1097 continue;
1098 }
1099 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1100 !buffer_unwritten(bh) &&
1101 (block_start < from || block_end > to)) {
zhangyi (F)2d069c02020-09-24 15:33:33 +08001102 ext4_read_bh_lock(bh, 0, false);
Chandan Rajendra0b578f32019-05-20 09:29:50 -07001103 wait[nr_wait++] = bh;
Michael Halcrow2058f832015-04-12 00:55:10 -04001104 }
1105 }
1106 /*
1107 * If we issued read requests, let them complete.
1108 */
Chandan Rajendra0b578f32019-05-20 09:29:50 -07001109 for (i = 0; i < nr_wait; i++) {
1110 wait_on_buffer(wait[i]);
1111 if (!buffer_uptodate(wait[i]))
Michael Halcrow2058f832015-04-12 00:55:10 -04001112 err = -EIO;
1113 }
Chandan Rajendra7e0785f2019-05-20 09:29:49 -07001114 if (unlikely(err)) {
Michael Halcrow2058f832015-04-12 00:55:10 -04001115 page_zero_new_buffers(page, from, to);
Eric Biggers4f74d152020-07-02 01:56:07 +00001116 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
Chandan Rajendra0b578f32019-05-20 09:29:50 -07001117 for (i = 0; i < nr_wait; i++) {
1118 int err2;
1119
1120 err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1121 bh_offset(wait[i]));
1122 if (err2) {
1123 clear_buffer_uptodate(wait[i]);
1124 err = err2;
1125 }
1126 }
Chandan Rajendra7e0785f2019-05-20 09:29:49 -07001127 }
1128
Michael Halcrow2058f832015-04-12 00:55:10 -04001129 return err;
1130}
1131#endif
1132
Nick Pigginbfc1af62007-10-16 01:25:05 -07001133static int ext4_write_begin(struct file *file, struct address_space *mapping,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001134 loff_t pos, unsigned len, unsigned flags,
1135 struct page **pagep, void **fsdata)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001136{
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001137 struct inode *inode = mapping->host;
Aneesh Kumar K.V1938a152009-06-05 01:00:26 -04001138 int ret, needed_blocks;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001139 handle_t *handle;
1140 int retries = 0;
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001141 struct page *page;
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001142 pgoff_t index;
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001143 unsigned from, to;
Nick Pigginbfc1af62007-10-16 01:25:05 -07001144
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05001145 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1146 return -EIO;
1147
Theodore Ts'o9bffad12009-06-17 11:48:11 -04001148 trace_ext4_write_begin(inode, pos, len, flags);
Aneesh Kumar K.V1938a152009-06-05 01:00:26 -04001149 /*
1150 * Reserve one block more for addition to orphan list in case
1151 * we allocate blocks but write fails for some reason
1152 */
1153 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001154 index = pos >> PAGE_SHIFT;
1155 from = pos & (PAGE_SIZE - 1);
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001156 to = from + len;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001157
Tao Maf19d5872012-12-10 14:05:51 -05001158 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1159 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1160 flags, pagep);
1161 if (ret < 0)
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001162 return ret;
1163 if (ret == 1)
1164 return 0;
Tao Maf19d5872012-12-10 14:05:51 -05001165 }
1166
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001167 /*
1168 * grab_cache_page_write_begin() can take a long time if the
1169 * system is thrashing due to memory pressure, or if the page
1170 * is being written back. So grab it first before we start
1171 * the transaction handle. This also allows us to allocate
1172 * the page (if needed) without using GFP_NOFS.
1173 */
1174retry_grab:
Nick Piggin54566b22009-01-04 12:00:53 -08001175 page = grab_cache_page_write_begin(mapping, index, flags);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001176 if (!page)
1177 return -ENOMEM;
1178 unlock_page(page);
1179
1180retry_journal:
Theodore Ts'o9924a922013-02-08 21:59:22 -05001181 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001182 if (IS_ERR(handle)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001183 put_page(page);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001184 return PTR_ERR(handle);
Jan Karacf108bc2008-07-11 19:27:31 -04001185 }
Tao Maf19d5872012-12-10 14:05:51 -05001186
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001187 lock_page(page);
1188 if (page->mapping != mapping) {
1189 /* The page got truncated from under us */
1190 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001191 put_page(page);
Jan Karacf108bc2008-07-11 19:27:31 -04001192 ext4_journal_stop(handle);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001193 goto retry_grab;
Jan Karacf108bc2008-07-11 19:27:31 -04001194 }
Dmitry Monakhov7afe5aa2013-08-28 14:30:47 -04001195 /* In case writeback began while the page was unlocked */
1196 wait_for_stable_page(page);
Jan Karacf108bc2008-07-11 19:27:31 -04001197
Chandan Rajendra643fa962018-12-12 15:20:12 +05301198#ifdef CONFIG_FS_ENCRYPTION
Michael Halcrow2058f832015-04-12 00:55:10 -04001199 if (ext4_should_dioread_nolock(inode))
1200 ret = ext4_block_write_begin(page, pos, len,
Jan Kara705965b2016-03-08 23:08:10 -05001201 ext4_get_block_unwritten);
Michael Halcrow2058f832015-04-12 00:55:10 -04001202 else
1203 ret = ext4_block_write_begin(page, pos, len,
1204 ext4_get_block);
1205#else
Jiaying Zhang744692d2010-03-04 16:14:02 -05001206 if (ext4_should_dioread_nolock(inode))
Jan Kara705965b2016-03-08 23:08:10 -05001207 ret = __block_write_begin(page, pos, len,
1208 ext4_get_block_unwritten);
Jiaying Zhang744692d2010-03-04 16:14:02 -05001209 else
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001210 ret = __block_write_begin(page, pos, len, ext4_get_block);
Michael Halcrow2058f832015-04-12 00:55:10 -04001211#endif
Nick Pigginbfc1af62007-10-16 01:25:05 -07001212 if (!ret && ext4_should_journal_data(inode)) {
Tao Maf19d5872012-12-10 14:05:51 -05001213 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1214 from, to, NULL,
1215 do_journal_get_write_access);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001216 }
Nick Pigginbfc1af62007-10-16 01:25:05 -07001217
1218 if (ret) {
Eric Biggersc93d8f82019-07-22 09:26:24 -07001219 bool extended = (pos + len > inode->i_size) &&
1220 !ext4_verity_in_progress(inode);
1221
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04001222 unlock_page(page);
Aneesh Kumar K.Vae4d5372008-09-13 13:10:25 -04001223 /*
Christoph Hellwig6e1db882010-06-04 11:29:57 +02001224 * __block_write_begin may have instantiated a few blocks
Aneesh Kumar K.Vae4d5372008-09-13 13:10:25 -04001225 * outside i_size. Trim these off again. Don't need
1226 * i_size_read because we hold i_mutex.
Aneesh Kumar K.V1938a152009-06-05 01:00:26 -04001227 *
1228 * Add inode to orphan list in case we crash before
1229 * truncate finishes
Aneesh Kumar K.Vae4d5372008-09-13 13:10:25 -04001230 */
Eric Biggersc93d8f82019-07-22 09:26:24 -07001231 if (extended && ext4_can_truncate(inode))
Aneesh Kumar K.V1938a152009-06-05 01:00:26 -04001232 ext4_orphan_add(handle, inode);
1233
1234 ext4_journal_stop(handle);
Eric Biggersc93d8f82019-07-22 09:26:24 -07001235 if (extended) {
Jan Karab9a42072009-12-08 21:24:33 -05001236 ext4_truncate_failed_write(inode);
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001237 /*
Jan Karaffacfa72009-07-13 16:22:22 -04001238 * If truncate failed early the inode might
Aneesh Kumar K.V1938a152009-06-05 01:00:26 -04001239 * still be on the orphan list; we need to
1240 * make sure the inode is removed from the
1241 * orphan list in that case.
1242 */
1243 if (inode->i_nlink)
1244 ext4_orphan_del(NULL, inode);
1245 }
Nick Pigginbfc1af62007-10-16 01:25:05 -07001246
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001247 if (ret == -ENOSPC &&
1248 ext4_should_retry_alloc(inode->i_sb, &retries))
1249 goto retry_journal;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001250 put_page(page);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05001251 return ret;
1252 }
1253 *pagep = page;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001254 return ret;
1255}
1256
Nick Pigginbfc1af62007-10-16 01:25:05 -07001257/* For write_end() in data=journal mode */
1258static int write_end_fn(handle_t *handle, struct buffer_head *bh)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001259{
Theodore Ts'o13fca322013-04-21 16:45:54 -04001260 int ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001261 if (!buffer_mapped(bh) || buffer_freed(bh))
1262 return 0;
1263 set_buffer_uptodate(bh);
Theodore Ts'o13fca322013-04-21 16:45:54 -04001264 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1265 clear_buffer_meta(bh);
1266 clear_buffer_prio(bh);
1267 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001268}
1269
Zheng Liueed43332013-04-03 12:41:17 -04001270/*
1271 * We need to pick up the new inode size which generic_commit_write gave us
1272 * `file' can be NULL - eg, when called from page_symlink().
1273 *
1274 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1275 * buffers are managed internally.
1276 */
1277static int ext4_write_end(struct file *file,
1278 struct address_space *mapping,
1279 loff_t pos, unsigned len, unsigned copied,
1280 struct page *page, void *fsdata)
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001281{
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001282 handle_t *handle = ext4_journal_current_handle();
Zheng Liueed43332013-04-03 12:41:17 -04001283 struct inode *inode = mapping->host;
Xiaoguang Wang05726392015-02-12 23:00:17 -05001284 loff_t old_size = inode->i_size;
Zheng Liueed43332013-04-03 12:41:17 -04001285 int ret = 0, ret2;
1286 int i_size_changed = 0;
Theodore Ts'o362eca72018-07-10 01:07:43 -04001287 int inline_data = ext4_has_inline_data(inode);
Eric Biggersc93d8f82019-07-22 09:26:24 -07001288 bool verity = ext4_verity_in_progress(inode);
Zheng Liueed43332013-04-03 12:41:17 -04001289
1290 trace_ext4_write_end(inode, pos, len, copied);
Theodore Ts'o362eca72018-07-10 01:07:43 -04001291 if (inline_data) {
Theodore Ts'o42c832d2013-07-01 08:12:39 -04001292 ret = ext4_write_inline_data_end(inode, pos, len,
1293 copied, page);
Theodore Ts'oeb5efbc2017-02-04 23:04:00 -05001294 if (ret < 0) {
1295 unlock_page(page);
1296 put_page(page);
Theodore Ts'o42c832d2013-07-01 08:12:39 -04001297 goto errout;
Theodore Ts'oeb5efbc2017-02-04 23:04:00 -05001298 }
Theodore Ts'o42c832d2013-07-01 08:12:39 -04001299 copied = ret;
1300 } else
Tao Maf19d5872012-12-10 14:05:51 -05001301 copied = block_write_end(file, mapping, pos,
1302 len, copied, page, fsdata);
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001303 /*
Dmitry Monakhov4631dbf2014-08-23 17:48:28 -04001304 * it's important to update i_size while still holding page lock:
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001305 * page writeout could otherwise come in and zero beyond i_size.
Eric Biggersc93d8f82019-07-22 09:26:24 -07001306 *
1307 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1308 * blocks are being written past EOF, so skip the i_size update.
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001309 */
Eric Biggersc93d8f82019-07-22 09:26:24 -07001310 if (!verity)
1311 i_size_changed = ext4_update_inode_size(inode, pos + copied);
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001312 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001313 put_page(page);
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001314
Eric Biggersc93d8f82019-07-22 09:26:24 -07001315 if (old_size < pos && !verity)
Xiaoguang Wang05726392015-02-12 23:00:17 -05001316 pagecache_isize_extended(inode, old_size, pos);
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001317 /*
1318 * Don't mark the inode dirty under page lock. First, it unnecessarily
1319 * makes the holding time of page lock longer. Second, it forces lock
1320 * ordering of page lock and transaction start for journaling
1321 * filesystems.
1322 */
Theodore Ts'o362eca72018-07-10 01:07:43 -04001323 if (i_size_changed || inline_data)
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07001324 ret = ext4_mark_inode_dirty(handle, inode);
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001325
Eric Biggersc93d8f82019-07-22 09:26:24 -07001326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001327 /* if we have allocated more blocks and copied
1328 * less. We will have blocks allocated outside
1329 * inode->i_size. So truncate them
1330 */
1331 ext4_orphan_add(handle, inode);
Theodore Ts'o74d553a2013-04-03 12:39:17 -04001332errout:
Mingming Cao617ba132006-10-11 01:20:53 -07001333 ret2 = ext4_journal_stop(handle);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001334 if (!ret)
1335 ret = ret2;
Nick Pigginbfc1af62007-10-16 01:25:05 -07001336
Eric Biggersc93d8f82019-07-22 09:26:24 -07001337 if (pos + len > inode->i_size && !verity) {
Jan Karab9a42072009-12-08 21:24:33 -05001338 ext4_truncate_failed_write(inode);
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001339 /*
Jan Karaffacfa72009-07-13 16:22:22 -04001340 * If truncate failed early the inode might still be
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001341 * on the orphan list; we need to make sure the inode
1342 * is removed from the orphan list in that case.
1343 */
1344 if (inode->i_nlink)
1345 ext4_orphan_del(NULL, inode);
1346 }
1347
Nick Pigginbfc1af62007-10-16 01:25:05 -07001348 return ret ? ret : copied;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001349}
1350
Theodore Ts'ob90197b2015-10-15 10:29:05 -04001351/*
1352 * This is a private version of page_zero_new_buffers() which doesn't
1353 * set the buffer to be dirty, since in data=journalled mode we need
1354 * to call ext4_handle_dirty_metadata() instead.
1355 */
Jan Kara3b136492017-01-27 14:35:38 -05001356static void ext4_journalled_zero_new_buffers(handle_t *handle,
1357 struct page *page,
1358 unsigned from, unsigned to)
Theodore Ts'ob90197b2015-10-15 10:29:05 -04001359{
1360 unsigned int block_start = 0, block_end;
1361 struct buffer_head *head, *bh;
1362
1363 bh = head = page_buffers(page);
1364 do {
1365 block_end = block_start + bh->b_size;
1366 if (buffer_new(bh)) {
1367 if (block_end > from && block_start < to) {
1368 if (!PageUptodate(page)) {
1369 unsigned start, size;
1370
1371 start = max(from, block_start);
1372 size = min(to, block_end) - start;
1373
1374 zero_user(page, start, size);
Jan Kara3b136492017-01-27 14:35:38 -05001375 write_end_fn(handle, bh);
Theodore Ts'ob90197b2015-10-15 10:29:05 -04001376 }
1377 clear_buffer_new(bh);
1378 }
1379 }
1380 block_start = block_end;
1381 bh = bh->b_this_page;
1382 } while (bh != head);
1383}
1384
Nick Pigginbfc1af62007-10-16 01:25:05 -07001385static int ext4_journalled_write_end(struct file *file,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001386 struct address_space *mapping,
1387 loff_t pos, unsigned len, unsigned copied,
1388 struct page *page, void *fsdata)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001389{
Mingming Cao617ba132006-10-11 01:20:53 -07001390 handle_t *handle = ext4_journal_current_handle();
Nick Pigginbfc1af62007-10-16 01:25:05 -07001391 struct inode *inode = mapping->host;
Xiaoguang Wang05726392015-02-12 23:00:17 -05001392 loff_t old_size = inode->i_size;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001393 int ret = 0, ret2;
1394 int partial = 0;
Nick Pigginbfc1af62007-10-16 01:25:05 -07001395 unsigned from, to;
Dmitry Monakhov4631dbf2014-08-23 17:48:28 -04001396 int size_changed = 0;
Theodore Ts'o362eca72018-07-10 01:07:43 -04001397 int inline_data = ext4_has_inline_data(inode);
Eric Biggersc93d8f82019-07-22 09:26:24 -07001398 bool verity = ext4_verity_in_progress(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001399
Theodore Ts'o9bffad12009-06-17 11:48:11 -04001400 trace_ext4_journalled_write_end(inode, pos, len, copied);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001401 from = pos & (PAGE_SIZE - 1);
Nick Pigginbfc1af62007-10-16 01:25:05 -07001402 to = from + len;
1403
Curt Wohlgemuth441c8502011-08-13 11:25:18 -04001404 BUG_ON(!ext4_handle_valid(handle));
1405
Theodore Ts'o362eca72018-07-10 01:07:43 -04001406 if (inline_data) {
Theodore Ts'oeb5efbc2017-02-04 23:04:00 -05001407 ret = ext4_write_inline_data_end(inode, pos, len,
1408 copied, page);
1409 if (ret < 0) {
1410 unlock_page(page);
1411 put_page(page);
1412 goto errout;
1413 }
1414 copied = ret;
1415 } else if (unlikely(copied < len) && !PageUptodate(page)) {
Jan Kara3b136492017-01-27 14:35:38 -05001416 copied = 0;
1417 ext4_journalled_zero_new_buffers(handle, page, from, to);
1418 } else {
1419 if (unlikely(copied < len))
1420 ext4_journalled_zero_new_buffers(handle, page,
1421 from + copied, to);
Tao Ma3fdcfb62012-12-10 14:05:57 -05001422 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
Jan Kara3b136492017-01-27 14:35:38 -05001423 from + copied, &partial,
1424 write_end_fn);
Tao Ma3fdcfb62012-12-10 14:05:57 -05001425 if (!partial)
1426 SetPageUptodate(page);
1427 }
Eric Biggersc93d8f82019-07-22 09:26:24 -07001428 if (!verity)
1429 size_changed = ext4_update_inode_size(inode, pos + copied);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05001430 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
Jan Kara2d859db2011-07-26 09:07:11 -04001431 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
Dmitry Monakhov4631dbf2014-08-23 17:48:28 -04001432 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001433 put_page(page);
Dmitry Monakhov4631dbf2014-08-23 17:48:28 -04001434
Eric Biggersc93d8f82019-07-22 09:26:24 -07001435 if (old_size < pos && !verity)
Xiaoguang Wang05726392015-02-12 23:00:17 -05001436 pagecache_isize_extended(inode, old_size, pos);
1437
Theodore Ts'o362eca72018-07-10 01:07:43 -04001438 if (size_changed || inline_data) {
Mingming Cao617ba132006-10-11 01:20:53 -07001439 ret2 = ext4_mark_inode_dirty(handle, inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001440 if (!ret)
1441 ret = ret2;
1442 }
Nick Pigginbfc1af62007-10-16 01:25:05 -07001443
Eric Biggersc93d8f82019-07-22 09:26:24 -07001444 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001445 /* if we have allocated more blocks and copied
1446 * less. We will have blocks allocated outside
1447 * inode->i_size. So truncate them
1448 */
1449 ext4_orphan_add(handle, inode);
1450
Theodore Ts'oeb5efbc2017-02-04 23:04:00 -05001451errout:
Mingming Cao617ba132006-10-11 01:20:53 -07001452 ret2 = ext4_journal_stop(handle);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001453 if (!ret)
1454 ret = ret2;
Eric Biggersc93d8f82019-07-22 09:26:24 -07001455 if (pos + len > inode->i_size && !verity) {
Jan Karab9a42072009-12-08 21:24:33 -05001456 ext4_truncate_failed_write(inode);
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04001457 /*
Jan Karaffacfa72009-07-13 16:22:22 -04001458 * If truncate failed early the inode might still be
Aneesh Kumar K.Vf8514082009-06-05 00:56:49 -04001459 * on the orphan list; we need to make sure the inode
1460 * is removed from the orphan list in that case.
1461 */
1462 if (inode->i_nlink)
1463 ext4_orphan_del(NULL, inode);
1464 }
Nick Pigginbfc1af62007-10-16 01:25:05 -07001465
1466 return ret ? ret : copied;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001467}
Mingming Caod2a17632008-07-14 17:52:37 -04001468
Theodore Ts'o9d0be502010-01-01 02:41:30 -05001469/*
Eric Whitneyc27e43a2015-06-21 21:37:05 -04001470 * Reserve space for a single cluster
Theodore Ts'o9d0be502010-01-01 02:41:30 -05001471 */
Eric Whitneyc27e43a2015-06-21 21:37:05 -04001472static int ext4_da_reserve_space(struct inode *inode)
Mingming Caod2a17632008-07-14 17:52:37 -04001473{
Mingming Cao60e58e02009-01-22 18:13:05 +01001474 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001475 struct ext4_inode_info *ei = EXT4_I(inode);
Christoph Hellwig5dd40562010-03-03 09:05:00 -05001476 int ret;
Mingming Caod2a17632008-07-14 17:52:37 -04001477
Mingming Cao60e58e02009-01-22 18:13:05 +01001478 /*
Eric Sandeen72b8ab92010-05-16 11:00:00 -04001479 * We will charge metadata quota at writeout time; this saves
1480 * us from metadata over-estimation, though we may go over by
1481 * a small amount in the end. Here we just reserve for data.
Mingming Cao60e58e02009-01-22 18:13:05 +01001482 */
Aditya Kali7b415bf2011-09-09 19:04:51 -04001483 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
Christoph Hellwig5dd40562010-03-03 09:05:00 -05001484 if (ret)
1485 return ret;
Theodore Ts'o03179fe2012-07-23 00:00:20 -04001486
Theodore Ts'o03179fe2012-07-23 00:00:20 -04001487 spin_lock(&ei->i_block_reservation_lock);
Theodore Ts'o71d4f7d2014-07-15 06:02:38 -04001488 if (ext4_claim_free_clusters(sbi, 1, 0)) {
Theodore Ts'o03179fe2012-07-23 00:00:20 -04001489 spin_unlock(&ei->i_block_reservation_lock);
Theodore Ts'o03179fe2012-07-23 00:00:20 -04001490 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
Mingming Caod2a17632008-07-14 17:52:37 -04001491 return -ENOSPC;
1492 }
Theodore Ts'o9d0be502010-01-01 02:41:30 -05001493 ei->i_reserved_data_blocks++;
Eric Whitneyc27e43a2015-06-21 21:37:05 -04001494 trace_ext4_da_reserve_space(inode);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001495 spin_unlock(&ei->i_block_reservation_lock);
Dmitry Monakhov39bc6802009-12-10 16:36:27 +00001496
Mingming Caod2a17632008-07-14 17:52:37 -04001497 return 0; /* success */
1498}
1499
Eric Whitneyf4567672018-10-01 14:33:24 -04001500void ext4_da_release_space(struct inode *inode, int to_free)
Mingming Caod2a17632008-07-14 17:52:37 -04001501{
1502 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001503 struct ext4_inode_info *ei = EXT4_I(inode);
Mingming Caod2a17632008-07-14 17:52:37 -04001504
Mingming Caocd213222008-08-19 22:16:59 -04001505 if (!to_free)
1506 return; /* Nothing to release, exit */
1507
Mingming Caod2a17632008-07-14 17:52:37 -04001508 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
Mingming Caocd213222008-08-19 22:16:59 -04001509
Li Zefan5a58ec872010-05-17 02:00:00 -04001510 trace_ext4_da_release_space(inode, to_free);
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001511 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
Mingming Caocd213222008-08-19 22:16:59 -04001512 /*
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001513 * if there aren't enough reserved blocks, then the
1514 * counter is messed up somewhere. Since this
1515 * function is called from invalidate page, it's
1516 * harmless to return without any action.
Mingming Caocd213222008-08-19 22:16:59 -04001517 */
Theodore Ts'o8de5c322013-02-14 15:11:41 -05001518 ext4_warning(inode->i_sb, "ext4_da_release_space: "
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001519 "ino %lu, to_free %d with only %d reserved "
Theodore Ts'o1084f252012-03-19 23:13:43 -04001520 "data blocks", inode->i_ino, to_free,
Theodore Ts'o0637c6f2009-12-30 14:20:45 -05001521 ei->i_reserved_data_blocks);
1522 WARN_ON(1);
1523 to_free = ei->i_reserved_data_blocks;
1524 }
1525 ei->i_reserved_data_blocks -= to_free;
1526
Eric Sandeen72b8ab92010-05-16 11:00:00 -04001527 /* update fs dirty data blocks counter */
Theodore Ts'o57042652011-09-09 18:56:51 -04001528 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
Mingming Caod2a17632008-07-14 17:52:37 -04001529
Mingming Caod2a17632008-07-14 17:52:37 -04001530 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
Mingming Cao60e58e02009-01-22 18:13:05 +01001531
Aditya Kali7b415bf2011-09-09 19:04:51 -04001532 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
Mingming Caod2a17632008-07-14 17:52:37 -04001533}
1534
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001535/*
Alex Tomas64769242008-07-11 19:27:31 -04001536 * Delayed allocation stuff
1537 */
1538
Jan Kara4e7ea812013-06-04 13:17:40 -04001539struct mpage_da_data {
1540 struct inode *inode;
1541 struct writeback_control *wbc;
Jan Kara6b523df2013-06-04 13:21:11 -04001542
Jan Kara4e7ea812013-06-04 13:17:40 -04001543 pgoff_t first_page; /* The first page to write */
1544 pgoff_t next_page; /* Current page to examine */
1545 pgoff_t last_page; /* Last page to examine */
Aneesh Kumar K.V791b7f02009-01-05 21:50:43 -05001546 /*
Jan Kara4e7ea812013-06-04 13:17:40 -04001547 * Extent to map - this can be after first_page because that can be
1548 * fully mapped. We somewhat abuse m_flags to store whether the extent
1549 * is delalloc or unwritten.
Aneesh Kumar K.V791b7f02009-01-05 21:50:43 -05001550 */
Jan Kara4e7ea812013-06-04 13:17:40 -04001551 struct ext4_map_blocks map;
1552 struct ext4_io_submit io_submit; /* IO submission data */
Jan Karadddbd6a2017-04-30 18:29:10 -04001553 unsigned int do_map:1;
Jan Kara6b8ed622020-05-25 10:12:15 +02001554 unsigned int scanned_until_end:1;
Jan Kara4e7ea812013-06-04 13:17:40 -04001555};
Alex Tomas64769242008-07-11 19:27:31 -04001556
Jan Kara4e7ea812013-06-04 13:17:40 -04001557static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1558 bool invalidate)
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001559{
1560 int nr_pages, i;
1561 pgoff_t index, end;
1562 struct pagevec pvec;
1563 struct inode *inode = mpd->inode;
1564 struct address_space *mapping = inode->i_mapping;
Jan Kara4e7ea812013-06-04 13:17:40 -04001565
1566 /* This is necessary when next_page == 0. */
1567 if (mpd->first_page >= mpd->next_page)
1568 return;
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001569
Jan Kara6b8ed622020-05-25 10:12:15 +02001570 mpd->scanned_until_end = 0;
Curt Wohlgemuthc7f59382011-02-26 12:27:52 -05001571 index = mpd->first_page;
1572 end = mpd->next_page - 1;
Jan Kara4e7ea812013-06-04 13:17:40 -04001573 if (invalidate) {
1574 ext4_lblk_t start, last;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001575 start = index << (PAGE_SHIFT - inode->i_blkbits);
1576 last = end << (PAGE_SHIFT - inode->i_blkbits);
Jan Kara4e7ea812013-06-04 13:17:40 -04001577 ext4_es_remove_extent(inode, start, last - start + 1);
1578 }
Zheng Liu51865fd2012-11-08 21:57:32 -05001579
Mel Gorman86679822017-11-15 17:37:52 -08001580 pagevec_init(&pvec);
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001581 while (index <= end) {
Jan Kara397162f2017-09-06 16:21:43 -07001582 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001583 if (nr_pages == 0)
1584 break;
1585 for (i = 0; i < nr_pages; i++) {
1586 struct page *page = pvec.pages[i];
Jan Kara2b85a612017-09-06 16:21:30 -07001587
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001588 BUG_ON(!PageLocked(page));
1589 BUG_ON(PageWriteback(page));
Jan Kara4e7ea812013-06-04 13:17:40 -04001590 if (invalidate) {
wangguang4e800c02016-09-15 11:32:46 -04001591 if (page_mapped(page))
1592 clear_page_dirty_for_io(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001593 block_invalidatepage(page, 0, PAGE_SIZE);
Jan Kara4e7ea812013-06-04 13:17:40 -04001594 ClearPageUptodate(page);
1595 }
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001596 unlock_page(page);
1597 }
Jan Kara9b1d09982010-03-03 16:19:32 -05001598 pagevec_release(&pvec);
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001599 }
Aneesh Kumar K.Vc4a0c462008-08-19 21:08:18 -04001600}
1601
Aneesh Kumar K.Vdf222912008-09-08 23:05:34 -04001602static void ext4_print_free_blocks(struct inode *inode)
1603{
1604 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Theodore Ts'o92b97812012-03-19 23:41:49 -04001605 struct super_block *sb = inode->i_sb;
Lukas Czernerf78ee70d2013-04-03 23:33:30 -04001606 struct ext4_inode_info *ei = EXT4_I(inode);
Theodore Ts'o92b97812012-03-19 23:41:49 -04001607
1608 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
Theodore Ts'o5dee5432011-09-09 19:10:51 -04001609 EXT4_C2B(EXT4_SB(inode->i_sb),
Lukas Czernerf78ee70d2013-04-03 23:33:30 -04001610 ext4_count_free_clusters(sb)));
Theodore Ts'o92b97812012-03-19 23:41:49 -04001611 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1612 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
Lukas Czernerf78ee70d2013-04-03 23:33:30 -04001613 (long long) EXT4_C2B(EXT4_SB(sb),
Theodore Ts'o57042652011-09-09 18:56:51 -04001614 percpu_counter_sum(&sbi->s_freeclusters_counter)));
Theodore Ts'o92b97812012-03-19 23:41:49 -04001615 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
Lukas Czernerf78ee70d2013-04-03 23:33:30 -04001616 (long long) EXT4_C2B(EXT4_SB(sb),
Aditya Kali7b415bf2011-09-09 19:04:51 -04001617 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
Theodore Ts'o92b97812012-03-19 23:41:49 -04001618 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1619 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
Lukas Czernerf78ee70d2013-04-03 23:33:30 -04001620 ei->i_reserved_data_blocks);
Aneesh Kumar K.Vdf222912008-09-08 23:05:34 -04001621 return;
1622}
1623
Aneesh Kumar K.Vc364b222009-06-14 17:57:10 -04001624static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
Aneesh Kumar K.V29fa89d2009-05-12 16:30:27 -04001625{
Aneesh Kumar K.Vc364b222009-06-14 17:57:10 -04001626 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
Aneesh Kumar K.V29fa89d2009-05-12 16:30:27 -04001627}
1628
Alex Tomas64769242008-07-11 19:27:31 -04001629/*
Eric Whitney0b02f4c2018-10-01 14:19:37 -04001630 * ext4_insert_delayed_block - adds a delayed block to the extents status
1631 * tree, incrementing the reserved cluster/block
1632 * count or making a pending reservation
1633 * where needed
1634 *
1635 * @inode - file containing the newly added block
1636 * @lblk - logical block to be added
1637 *
1638 * Returns 0 on success, negative error code on failure.
1639 */
1640static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1641{
1642 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1643 int ret;
1644 bool allocated = false;
1645
1646 /*
1647 * If the cluster containing lblk is shared with a delayed,
1648 * written, or unwritten extent in a bigalloc file system, it's
1649 * already been accounted for and does not need to be reserved.
1650 * A pending reservation must be made for the cluster if it's
1651 * shared with a written or unwritten extent and doesn't already
1652 * have one. Written and unwritten extents can be purged from the
1653 * extents status tree if the system is under memory pressure, so
1654 * it's necessary to examine the extent tree if a search of the
1655 * extents status tree doesn't get a match.
1656 */
1657 if (sbi->s_cluster_ratio == 1) {
1658 ret = ext4_da_reserve_space(inode);
1659 if (ret != 0) /* ENOSPC */
1660 goto errout;
1661 } else { /* bigalloc */
1662 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1663 if (!ext4_es_scan_clu(inode,
1664 &ext4_es_is_mapped, lblk)) {
1665 ret = ext4_clu_mapped(inode,
1666 EXT4_B2C(sbi, lblk));
1667 if (ret < 0)
1668 goto errout;
1669 if (ret == 0) {
1670 ret = ext4_da_reserve_space(inode);
1671 if (ret != 0) /* ENOSPC */
1672 goto errout;
1673 } else {
1674 allocated = true;
1675 }
1676 } else {
1677 allocated = true;
1678 }
1679 }
1680 }
1681
1682 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1683
1684errout:
1685 return ret;
1686}
1687
1688/*
Aditya Kali5356f2612011-09-09 19:20:51 -04001689 * This function is grabs code from the very beginning of
1690 * ext4_map_blocks, but assumes that the caller is from delayed write
1691 * time. This function looks up the requested blocks and sets the
1692 * buffer delay bit under the protection of i_data_sem.
1693 */
1694static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1695 struct ext4_map_blocks *map,
1696 struct buffer_head *bh)
1697{
Zheng Liud100eef2013-02-18 00:29:59 -05001698 struct extent_status es;
Aditya Kali5356f2612011-09-09 19:20:51 -04001699 int retval;
1700 sector_t invalid_block = ~((sector_t) 0xffff);
Dmitry Monakhov921f2662013-03-10 21:01:03 -04001701#ifdef ES_AGGRESSIVE_TEST
1702 struct ext4_map_blocks orig_map;
1703
1704 memcpy(&orig_map, map, sizeof(*map));
1705#endif
Aditya Kali5356f2612011-09-09 19:20:51 -04001706
1707 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1708 invalid_block = ~0;
1709
1710 map->m_flags = 0;
Ritesh Harjani70aa1552020-05-10 11:54:55 +05301711 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
Aditya Kali5356f2612011-09-09 19:20:51 -04001712 (unsigned long) map->m_lblk);
Zheng Liud100eef2013-02-18 00:29:59 -05001713
1714 /* Lookup extent status tree firstly */
Theodore Ts'obb5835e2019-08-11 16:32:41 -04001715 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
Zheng Liud100eef2013-02-18 00:29:59 -05001716 if (ext4_es_is_hole(&es)) {
1717 retval = 0;
Lukas Czernerc8b459f2014-05-12 12:55:07 -04001718 down_read(&EXT4_I(inode)->i_data_sem);
Zheng Liud100eef2013-02-18 00:29:59 -05001719 goto add_delayed;
1720 }
1721
1722 /*
1723 * Delayed extent could be allocated by fallocate.
1724 * So we need to check it.
1725 */
1726 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1727 map_bh(bh, inode->i_sb, invalid_block);
1728 set_buffer_new(bh);
1729 set_buffer_delay(bh);
1730 return 0;
1731 }
1732
1733 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1734 retval = es.es_len - (iblock - es.es_lblk);
1735 if (retval > map->m_len)
1736 retval = map->m_len;
1737 map->m_len = retval;
1738 if (ext4_es_is_written(&es))
1739 map->m_flags |= EXT4_MAP_MAPPED;
1740 else if (ext4_es_is_unwritten(&es))
1741 map->m_flags |= EXT4_MAP_UNWRITTEN;
1742 else
Arnd Bergmann1e83bc82019-04-07 12:24:43 -04001743 BUG();
Zheng Liud100eef2013-02-18 00:29:59 -05001744
Dmitry Monakhov921f2662013-03-10 21:01:03 -04001745#ifdef ES_AGGRESSIVE_TEST
1746 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1747#endif
Zheng Liud100eef2013-02-18 00:29:59 -05001748 return retval;
1749 }
1750
Aditya Kali5356f2612011-09-09 19:20:51 -04001751 /*
1752 * Try to see if we can get the block without requesting a new
1753 * file system block.
1754 */
Lukas Czernerc8b459f2014-05-12 12:55:07 -04001755 down_read(&EXT4_I(inode)->i_data_sem);
Jan Karacbd75842014-11-25 11:41:49 -05001756 if (ext4_has_inline_data(inode))
Tao Ma9c3569b2012-12-10 14:05:57 -05001757 retval = 0;
Jan Karacbd75842014-11-25 11:41:49 -05001758 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
Zheng Liu2f8e0a72014-11-25 11:44:37 -05001759 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
Aditya Kali5356f2612011-09-09 19:20:51 -04001760 else
Zheng Liu2f8e0a72014-11-25 11:44:37 -05001761 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
Aditya Kali5356f2612011-09-09 19:20:51 -04001762
Zheng Liud100eef2013-02-18 00:29:59 -05001763add_delayed:
Aditya Kali5356f2612011-09-09 19:20:51 -04001764 if (retval == 0) {
Zheng Liuf7fec032013-02-18 00:28:47 -05001765 int ret;
Eric Whitneyad431022018-10-01 14:10:39 -04001766
Aditya Kali5356f2612011-09-09 19:20:51 -04001767 /*
1768 * XXX: __block_prepare_write() unmaps passed block,
1769 * is it OK?
1770 */
Aditya Kali5356f2612011-09-09 19:20:51 -04001771
Eric Whitney0b02f4c2018-10-01 14:19:37 -04001772 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1773 if (ret != 0) {
Zheng Liuf7fec032013-02-18 00:28:47 -05001774 retval = ret;
Zheng Liu51865fd2012-11-08 21:57:32 -05001775 goto out_unlock;
Zheng Liuf7fec032013-02-18 00:28:47 -05001776 }
Zheng Liu51865fd2012-11-08 21:57:32 -05001777
Aditya Kali5356f2612011-09-09 19:20:51 -04001778 map_bh(bh, inode->i_sb, invalid_block);
1779 set_buffer_new(bh);
1780 set_buffer_delay(bh);
Zheng Liuf7fec032013-02-18 00:28:47 -05001781 } else if (retval > 0) {
1782 int ret;
Theodore Ts'o3be78c72013-08-16 21:22:41 -04001783 unsigned int status;
Zheng Liuf7fec032013-02-18 00:28:47 -05001784
Zheng Liu44fb851d2013-07-29 12:51:42 -04001785 if (unlikely(retval != map->m_len)) {
1786 ext4_warning(inode->i_sb,
1787 "ES len assertion failed for inode "
1788 "%lu: retval %d != map->m_len %d",
1789 inode->i_ino, retval, map->m_len);
1790 WARN_ON(1);
Dmitry Monakhov921f2662013-03-10 21:01:03 -04001791 }
Dmitry Monakhov921f2662013-03-10 21:01:03 -04001792
Zheng Liuf7fec032013-02-18 00:28:47 -05001793 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1794 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1795 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1796 map->m_pblk, status);
1797 if (ret != 0)
1798 retval = ret;
Aditya Kali5356f2612011-09-09 19:20:51 -04001799 }
1800
1801out_unlock:
1802 up_read((&EXT4_I(inode)->i_data_sem));
1803
1804 return retval;
1805}
1806
1807/*
Seunghun Leed91bd2c2014-09-01 22:15:30 -04001808 * This is a special get_block_t callback which is used by
Theodore Ts'ob920c752009-05-14 00:54:29 -04001809 * ext4_da_write_begin(). It will either return mapped block or
1810 * reserve space for a single block.
Aneesh Kumar K.V29fa89d2009-05-12 16:30:27 -04001811 *
1812 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1813 * We also have b_blocknr = -1 and b_bdev initialized properly
1814 *
1815 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1816 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1817 * initialized properly.
Alex Tomas64769242008-07-11 19:27:31 -04001818 */
Tao Ma9c3569b2012-12-10 14:05:57 -05001819int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1820 struct buffer_head *bh, int create)
Alex Tomas64769242008-07-11 19:27:31 -04001821{
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001822 struct ext4_map_blocks map;
Alex Tomas64769242008-07-11 19:27:31 -04001823 int ret = 0;
1824
1825 BUG_ON(create == 0);
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001826 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1827
1828 map.m_lblk = iblock;
1829 map.m_len = 1;
Alex Tomas64769242008-07-11 19:27:31 -04001830
1831 /*
1832 * first, we need to know whether the block is allocated already
1833 * preallocated blocks are unmapped but should treated
1834 * the same as allocated blocks.
1835 */
Aditya Kali5356f2612011-09-09 19:20:51 -04001836 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1837 if (ret <= 0)
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001838 return ret;
Alex Tomas64769242008-07-11 19:27:31 -04001839
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001840 map_bh(bh, inode->i_sb, map.m_pblk);
Jan Karaed8ad832016-02-19 00:18:25 -05001841 ext4_update_bh_state(bh, map.m_flags);
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001842
1843 if (buffer_unwritten(bh)) {
1844 /* A delayed write to unwritten bh should be marked
1845 * new and mapped. Mapped ensures that we don't do
1846 * get_block multiple times when we write to the same
1847 * offset and new ensures that we do proper zero out
1848 * for partial write.
1849 */
1850 set_buffer_new(bh);
Theodore Ts'oc8205632011-04-10 22:30:07 -04001851 set_buffer_mapped(bh);
Theodore Ts'o2ed88682010-05-16 20:00:00 -04001852 }
1853 return 0;
Alex Tomas64769242008-07-11 19:27:31 -04001854}
Mingming Cao61628a32008-07-11 19:27:31 -04001855
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001856static int bget_one(handle_t *handle, struct buffer_head *bh)
1857{
1858 get_bh(bh);
1859 return 0;
1860}
1861
1862static int bput_one(handle_t *handle, struct buffer_head *bh)
1863{
1864 put_bh(bh);
1865 return 0;
1866}
1867
1868static int __ext4_journalled_writepage(struct page *page,
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001869 unsigned int len)
1870{
1871 struct address_space *mapping = page->mapping;
1872 struct inode *inode = mapping->host;
Tao Ma3fdcfb62012-12-10 14:05:57 -05001873 struct buffer_head *page_bufs = NULL;
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001874 handle_t *handle = NULL;
Tao Ma3fdcfb62012-12-10 14:05:57 -05001875 int ret = 0, err = 0;
1876 int inline_data = ext4_has_inline_data(inode);
1877 struct buffer_head *inode_bh = NULL;
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001878
Theodore Ts'ocb20d512010-10-27 21:30:09 -04001879 ClearPageChecked(page);
Tao Ma3fdcfb62012-12-10 14:05:57 -05001880
1881 if (inline_data) {
1882 BUG_ON(page->index != 0);
1883 BUG_ON(len > ext4_get_max_inline_size(inode));
1884 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1885 if (inode_bh == NULL)
1886 goto out;
1887 } else {
1888 page_bufs = page_buffers(page);
1889 if (!page_bufs) {
1890 BUG();
1891 goto out;
1892 }
1893 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1894 NULL, bget_one);
1895 }
Theodore Ts'obdf96832015-06-12 23:45:33 -04001896 /*
1897 * We need to release the page lock before we start the
1898 * journal, so grab a reference so the page won't disappear
1899 * out from under us.
1900 */
1901 get_page(page);
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001902 unlock_page(page);
1903
Theodore Ts'o9924a922013-02-08 21:59:22 -05001904 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1905 ext4_writepage_trans_blocks(inode));
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001906 if (IS_ERR(handle)) {
1907 ret = PTR_ERR(handle);
Theodore Ts'obdf96832015-06-12 23:45:33 -04001908 put_page(page);
1909 goto out_no_pagelock;
1910 }
1911 BUG_ON(!ext4_handle_valid(handle));
1912
1913 lock_page(page);
1914 put_page(page);
1915 if (page->mapping != mapping) {
1916 /* The page got truncated from under us */
1917 ext4_journal_stop(handle);
1918 ret = 0;
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001919 goto out;
1920 }
1921
Tao Ma3fdcfb62012-12-10 14:05:57 -05001922 if (inline_data) {
Theodore Ts'o362eca72018-07-10 01:07:43 -04001923 ret = ext4_mark_inode_dirty(handle, inode);
Tao Ma3fdcfb62012-12-10 14:05:57 -05001924 } else {
1925 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1926 do_journal_get_write_access);
1927
1928 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1929 write_end_fn);
1930 }
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001931 if (ret == 0)
1932 ret = err;
Jan Karab5b18162020-10-27 14:27:51 +01001933 err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03001934 if (ret == 0)
1935 ret = err;
Jan Kara2d859db2011-07-26 09:07:11 -04001936 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001937 err = ext4_journal_stop(handle);
1938 if (!ret)
1939 ret = err;
1940
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05001941 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001942out:
Theodore Ts'obdf96832015-06-12 23:45:33 -04001943 unlock_page(page);
1944out_no_pagelock:
Zhaolong Zhangc915fb82021-03-02 17:42:31 +08001945 if (!inline_data && page_bufs)
1946 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1947 NULL, bput_one);
Tao Ma3fdcfb62012-12-10 14:05:57 -05001948 brelse(inode_bh);
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001949 return ret;
1950}
1951
Mingming Cao61628a32008-07-11 19:27:31 -04001952/*
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04001953 * Note that we don't need to start a transaction unless we're journaling data
1954 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1955 * need to file the inode to the transaction's list in ordered mode because if
1956 * we are writing back data added by write(), the inode is already there and if
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001957 * we are writing back data modified via mmap(), no one guarantees in which
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04001958 * transaction the data will hit the disk. In case we are journaling data, we
1959 * cannot start transaction directly because transaction start ranks above page
1960 * lock so we have to do some magic.
1961 *
Theodore Ts'ob920c752009-05-14 00:54:29 -04001962 * This function can get called via...
Theodore Ts'o20970ba2013-06-06 14:00:46 -04001963 * - ext4_writepages after taking page lock (have journal handle)
Theodore Ts'ob920c752009-05-14 00:54:29 -04001964 * - journal_submit_inode_data_buffers (no journal handle)
Artem Bityutskiyf6463b02012-07-25 18:12:04 +03001965 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
Theodore Ts'ob920c752009-05-14 00:54:29 -04001966 * - grab_page_cache when doing write_begin (have journal handle)
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04001967 *
1968 * We don't do any block allocation in this function. If we have page with
1969 * multiple blocks we need to write those buffer_heads that are mapped. This
1970 * is important for mmaped based write. So if we do with blocksize 1K
1971 * truncate(f, 1024);
1972 * a = mmap(f, 0, 4096);
1973 * a[0] = 'a';
1974 * truncate(f, 4096);
1975 * we have in the page first buffer_head mapped via page_mkwrite call back
Paul Bolle90802ed2011-12-05 13:00:34 +01001976 * but other buffer_heads would be unmapped but dirty (dirty done via the
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04001977 * do_wp_page). So writepage should write the first block. If we modify
1978 * the mmap area beyond 1024 we will again get a page_fault and the
1979 * page_mkwrite callback will do the block allocation and mark the
1980 * buffer_heads mapped.
1981 *
1982 * We redirty the page if we have any buffer_heads that is either delay or
1983 * unwritten in the page.
1984 *
1985 * We can get recursively called as show below.
1986 *
1987 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1988 * ext4_writepage()
1989 *
1990 * But since we don't do any block allocation we should not deadlock.
1991 * Page also have the dirty flag cleared so we don't get recurive page_lock.
Mingming Cao61628a32008-07-11 19:27:31 -04001992 */
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04001993static int ext4_writepage(struct page *page,
Aneesh Kumar K.V62e086b2009-06-14 17:59:34 -04001994 struct writeback_control *wbc)
Alex Tomas64769242008-07-11 19:27:31 -04001995{
Jan Karaf8bec372013-01-28 12:55:08 -05001996 int ret = 0;
Mingming Cao61628a32008-07-11 19:27:31 -04001997 loff_t size;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05001998 unsigned int len;
Jiaying Zhang744692d2010-03-04 16:14:02 -05001999 struct buffer_head *page_bufs = NULL;
Mingming Cao61628a32008-07-11 19:27:31 -04002000 struct inode *inode = page->mapping->host;
Jan Kara36ade452013-01-28 09:30:52 -05002001 struct ext4_io_submit io_submit;
Namjae Jeon1c8349a2014-05-12 08:12:25 -04002002 bool keep_towrite = false;
Alex Tomas64769242008-07-11 19:27:31 -04002003
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002004 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
yangerkunc2a559b2020-02-26 12:10:02 +08002005 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002006 unlock_page(page);
2007 return -EIO;
2008 }
2009
Lukas Czernera9c667f2011-06-06 09:51:52 -04002010 trace_ext4_writepage(page);
Aneesh Kumar K.Vf0e6c982008-07-11 19:27:31 -04002011 size = i_size_read(inode);
Eric Biggersc93d8f82019-07-22 09:26:24 -07002012 if (page->index == size >> PAGE_SHIFT &&
2013 !ext4_verity_in_progress(inode))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002014 len = size & ~PAGE_MASK;
Aneesh Kumar K.Vf0e6c982008-07-11 19:27:31 -04002015 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002016 len = PAGE_SIZE;
Alex Tomas64769242008-07-11 19:27:31 -04002017
Theodore Ts'oa42afc52010-10-27 21:30:09 -04002018 page_bufs = page_buffers(page);
Aneesh Kumar K.Vc364b222009-06-14 17:57:10 -04002019 /*
Jan Karafe386132013-01-28 21:06:42 -05002020 * We cannot do block allocation or other extent handling in this
2021 * function. If there are buffers needing that, we have to redirty
2022 * the page. But we may reach here when we do a journal commit via
2023 * journal_submit_inode_data_buffers() and in that case we must write
2024 * allocated buffers to achieve data=ordered mode guarantees.
Theodore Ts'occcd1472015-10-03 10:49:23 -04002025 *
2026 * Also, if there is only one buffer per page (the fs block
2027 * size == the page size), if one buffer needs block
2028 * allocation or needs to modify the extent tree to clear the
2029 * unwritten flag, we know that the page can't be written at
2030 * all, so we might as well refuse the write immediately.
2031 * Unfortunately if the block size != page size, we can't as
2032 * easily detect this case using ext4_walk_page_buffers(), but
2033 * for the extremely common case, this is an optimization that
2034 * skips a useless round trip through ext4_bio_write_page().
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -04002035 */
Tao Maf19d5872012-12-10 14:05:51 -05002036 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2037 ext4_bh_delay_or_unwritten)) {
Jan Karaf8bec372013-01-28 12:55:08 -05002038 redirty_page_for_writepage(wbc, page);
Theodore Ts'occcd1472015-10-03 10:49:23 -04002039 if ((current->flags & PF_MEMALLOC) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002040 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
Jan Karafe386132013-01-28 21:06:42 -05002041 /*
2042 * For memory cleaning there's no point in writing only
2043 * some buffers. So just bail out. Warn if we came here
2044 * from direct reclaim.
2045 */
2046 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2047 == PF_MEMALLOC);
Aneesh Kumar K.Vf0e6c982008-07-11 19:27:31 -04002048 unlock_page(page);
2049 return 0;
2050 }
Namjae Jeon1c8349a2014-05-12 08:12:25 -04002051 keep_towrite = true;
Theodore Ts'oa42afc52010-10-27 21:30:09 -04002052 }
Alex Tomas64769242008-07-11 19:27:31 -04002053
Theodore Ts'ocb20d512010-10-27 21:30:09 -04002054 if (PageChecked(page) && ext4_should_journal_data(inode))
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04002055 /*
2056 * It's mmapped pagecache. Add buffers and journal it. There
2057 * doesn't seem much point in redirtying the page here.
2058 */
Wu Fengguang3f0ca302009-11-24 11:15:44 -05002059 return __ext4_journalled_writepage(page, len);
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04002060
Jan Kara97a851e2013-06-04 11:58:58 -04002061 ext4_io_submit_init(&io_submit, wbc);
2062 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2063 if (!io_submit.io_end) {
2064 redirty_page_for_writepage(wbc, page);
2065 unlock_page(page);
2066 return -ENOMEM;
2067 }
Lei Chenbe993932020-12-11 14:54:24 +08002068 ret = ext4_bio_write_page(&io_submit, page, len, keep_towrite);
Jan Kara36ade452013-01-28 09:30:52 -05002069 ext4_io_submit(&io_submit);
Jan Kara97a851e2013-06-04 11:58:58 -04002070 /* Drop io_end reference we got from init */
2071 ext4_put_io_end_defer(io_submit.io_end);
Alex Tomas64769242008-07-11 19:27:31 -04002072 return ret;
2073}
2074
Jan Kara5f1132b2013-08-17 10:02:33 -04002075static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2076{
2077 int len;
Jan Karaa056bda2017-05-26 17:45:45 -04002078 loff_t size;
Jan Kara5f1132b2013-08-17 10:02:33 -04002079 int err;
2080
2081 BUG_ON(page->index != mpd->first_page);
Jan Karaa056bda2017-05-26 17:45:45 -04002082 clear_page_dirty_for_io(page);
2083 /*
2084 * We have to be very careful here! Nothing protects writeback path
2085 * against i_size changes and the page can be writeably mapped into
2086 * page tables. So an application can be growing i_size and writing
2087 * data through mmap while writeback runs. clear_page_dirty_for_io()
2088 * write-protects our page in page tables and the page cannot get
2089 * written to again until we release page lock. So only after
2090 * clear_page_dirty_for_io() we are safe to sample i_size for
2091 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2092 * on the barrier provided by TestClearPageDirty in
2093 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2094 * after page tables are updated.
2095 */
2096 size = i_size_read(mpd->inode);
Eric Biggersc93d8f82019-07-22 09:26:24 -07002097 if (page->index == size >> PAGE_SHIFT &&
2098 !ext4_verity_in_progress(mpd->inode))
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002099 len = size & ~PAGE_MASK;
Jan Kara5f1132b2013-08-17 10:02:33 -04002100 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002101 len = PAGE_SIZE;
Lei Chenbe993932020-12-11 14:54:24 +08002102 err = ext4_bio_write_page(&mpd->io_submit, page, len, false);
Jan Kara5f1132b2013-08-17 10:02:33 -04002103 if (!err)
2104 mpd->wbc->nr_to_write--;
2105 mpd->first_page++;
2106
2107 return err;
2108}
2109
Ritesh Harjani6db07462020-05-10 11:54:51 +05302110#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
Jan Kara4e7ea812013-06-04 13:17:40 -04002111
Mingming Cao61628a32008-07-11 19:27:31 -04002112/*
Jan Karafffb2732013-06-04 13:01:11 -04002113 * mballoc gives us at most this number of blocks...
2114 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
Anatol Pomozov70261f52013-08-28 14:40:12 -04002115 * The rest of mballoc seems to handle chunks up to full group size.
Mingming Cao61628a32008-07-11 19:27:31 -04002116 */
Jan Karafffb2732013-06-04 13:01:11 -04002117#define MAX_WRITEPAGES_EXTENT_LEN 2048
Mingming Cao525f4ed2008-08-19 22:15:58 -04002118
Jan Karafffb2732013-06-04 13:01:11 -04002119/*
Jan Kara4e7ea812013-06-04 13:17:40 -04002120 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2121 *
2122 * @mpd - extent of blocks
2123 * @lblk - logical number of the block in the file
Jan Kara09930042013-08-17 09:57:56 -04002124 * @bh - buffer head we want to add to the extent
Jan Kara4e7ea812013-06-04 13:17:40 -04002125 *
Jan Kara09930042013-08-17 09:57:56 -04002126 * The function is used to collect contig. blocks in the same state. If the
2127 * buffer doesn't require mapping for writeback and we haven't started the
2128 * extent of buffers to map yet, the function returns 'true' immediately - the
2129 * caller can write the buffer right away. Otherwise the function returns true
2130 * if the block has been added to the extent, false if the block couldn't be
2131 * added.
Jan Kara4e7ea812013-06-04 13:17:40 -04002132 */
Jan Kara09930042013-08-17 09:57:56 -04002133static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2134 struct buffer_head *bh)
Jan Kara4e7ea812013-06-04 13:17:40 -04002135{
2136 struct ext4_map_blocks *map = &mpd->map;
2137
Jan Kara09930042013-08-17 09:57:56 -04002138 /* Buffer that doesn't need mapping for writeback? */
2139 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2140 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2141 /* So far no extent to map => we write the buffer right away */
2142 if (map->m_len == 0)
2143 return true;
2144 return false;
2145 }
Jan Kara4e7ea812013-06-04 13:17:40 -04002146
2147 /* First block in the extent? */
2148 if (map->m_len == 0) {
Jan Karadddbd6a2017-04-30 18:29:10 -04002149 /* We cannot map unless handle is started... */
2150 if (!mpd->do_map)
2151 return false;
Jan Kara4e7ea812013-06-04 13:17:40 -04002152 map->m_lblk = lblk;
2153 map->m_len = 1;
Jan Kara09930042013-08-17 09:57:56 -04002154 map->m_flags = bh->b_state & BH_FLAGS;
2155 return true;
Jan Kara4e7ea812013-06-04 13:17:40 -04002156 }
2157
Jan Kara09930042013-08-17 09:57:56 -04002158 /* Don't go larger than mballoc is willing to allocate */
2159 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2160 return false;
2161
Jan Kara4e7ea812013-06-04 13:17:40 -04002162 /* Can we merge the block to our big extent? */
2163 if (lblk == map->m_lblk + map->m_len &&
Jan Kara09930042013-08-17 09:57:56 -04002164 (bh->b_state & BH_FLAGS) == map->m_flags) {
Jan Kara4e7ea812013-06-04 13:17:40 -04002165 map->m_len++;
Jan Kara09930042013-08-17 09:57:56 -04002166 return true;
Jan Kara4e7ea812013-06-04 13:17:40 -04002167 }
Jan Kara09930042013-08-17 09:57:56 -04002168 return false;
Jan Kara4e7ea812013-06-04 13:17:40 -04002169}
2170
Jan Kara5f1132b2013-08-17 10:02:33 -04002171/*
2172 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2173 *
2174 * @mpd - extent of blocks for mapping
2175 * @head - the first buffer in the page
2176 * @bh - buffer we should start processing from
2177 * @lblk - logical number of the block in the file corresponding to @bh
2178 *
2179 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2180 * the page for IO if all buffers in this page were mapped and there's no
2181 * accumulated extent of buffers to map or add buffers in the page to the
2182 * extent of buffers to map. The function returns 1 if the caller can continue
2183 * by processing the next page, 0 if it should stop adding buffers to the
2184 * extent to map because we cannot extend it anymore. It can also return value
2185 * < 0 in case of error during IO submission.
2186 */
2187static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2188 struct buffer_head *head,
2189 struct buffer_head *bh,
2190 ext4_lblk_t lblk)
Jan Kara4e7ea812013-06-04 13:17:40 -04002191{
2192 struct inode *inode = mpd->inode;
Jan Kara5f1132b2013-08-17 10:02:33 -04002193 int err;
Fabian Frederick93407472017-02-27 14:28:32 -08002194 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
Jan Kara4e7ea812013-06-04 13:17:40 -04002195 >> inode->i_blkbits;
2196
Eric Biggersc93d8f82019-07-22 09:26:24 -07002197 if (ext4_verity_in_progress(inode))
2198 blocks = EXT_MAX_BLOCKS;
2199
Jan Kara4e7ea812013-06-04 13:17:40 -04002200 do {
2201 BUG_ON(buffer_locked(bh));
2202
Jan Kara09930042013-08-17 09:57:56 -04002203 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
Jan Kara4e7ea812013-06-04 13:17:40 -04002204 /* Found extent to map? */
2205 if (mpd->map.m_len)
Jan Kara5f1132b2013-08-17 10:02:33 -04002206 return 0;
Jan Karadddbd6a2017-04-30 18:29:10 -04002207 /* Buffer needs mapping and handle is not started? */
2208 if (!mpd->do_map)
2209 return 0;
Jan Kara09930042013-08-17 09:57:56 -04002210 /* Everything mapped so far and we hit EOF */
Jan Kara5f1132b2013-08-17 10:02:33 -04002211 break;
Jan Kara4e7ea812013-06-04 13:17:40 -04002212 }
Jan Kara4e7ea812013-06-04 13:17:40 -04002213 } while (lblk++, (bh = bh->b_this_page) != head);
Jan Kara5f1132b2013-08-17 10:02:33 -04002214 /* So far everything mapped? Submit the page for IO. */
2215 if (mpd->map.m_len == 0) {
2216 err = mpage_submit_page(mpd, head->b_page);
2217 if (err < 0)
2218 return err;
2219 }
Jan Kara6b8ed622020-05-25 10:12:15 +02002220 if (lblk >= blocks) {
2221 mpd->scanned_until_end = 1;
2222 return 0;
2223 }
2224 return 1;
Jan Kara4e7ea812013-06-04 13:17:40 -04002225}
2226
2227/*
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302228 * mpage_process_page - update page buffers corresponding to changed extent and
2229 * may submit fully mapped page for IO
2230 *
2231 * @mpd - description of extent to map, on return next extent to map
2232 * @m_lblk - logical block mapping.
2233 * @m_pblk - corresponding physical mapping.
2234 * @map_bh - determines on return whether this page requires any further
2235 * mapping or not.
2236 * Scan given page buffers corresponding to changed extent and update buffer
2237 * state according to new extent state.
2238 * We map delalloc buffers to their physical location, clear unwritten bits.
2239 * If the given page is not fully mapped, we update @map to the next extent in
2240 * the given page that needs mapping & return @map_bh as true.
2241 */
2242static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
2243 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2244 bool *map_bh)
2245{
2246 struct buffer_head *head, *bh;
2247 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2248 ext4_lblk_t lblk = *m_lblk;
2249 ext4_fsblk_t pblock = *m_pblk;
2250 int err = 0;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302251 int blkbits = mpd->inode->i_blkbits;
2252 ssize_t io_end_size = 0;
2253 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302254
2255 bh = head = page_buffers(page);
2256 do {
2257 if (lblk < mpd->map.m_lblk)
2258 continue;
2259 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2260 /*
2261 * Buffer after end of mapped extent.
2262 * Find next buffer in the page to map.
2263 */
2264 mpd->map.m_len = 0;
2265 mpd->map.m_flags = 0;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302266 io_end_vec->size += io_end_size;
2267 io_end_size = 0;
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302268
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302269 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2270 if (err > 0)
2271 err = 0;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302272 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2273 io_end_vec = ext4_alloc_io_end_vec(io_end);
Ritesh Harjani4d06bfb2019-11-06 15:08:09 +05302274 if (IS_ERR(io_end_vec)) {
2275 err = PTR_ERR(io_end_vec);
2276 goto out;
2277 }
Ritesh Harjanid1e18b82020-10-08 20:32:48 +05302278 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302279 }
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302280 *map_bh = true;
2281 goto out;
2282 }
2283 if (buffer_delay(bh)) {
2284 clear_buffer_delay(bh);
2285 bh->b_blocknr = pblock++;
2286 }
2287 clear_buffer_unwritten(bh);
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302288 io_end_size += (1 << blkbits);
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302289 } while (lblk++, (bh = bh->b_this_page) != head);
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302290
2291 io_end_vec->size += io_end_size;
2292 io_end_size = 0;
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302293 *map_bh = false;
2294out:
2295 *m_lblk = lblk;
2296 *m_pblk = pblock;
2297 return err;
2298}
2299
2300/*
Jan Kara4e7ea812013-06-04 13:17:40 -04002301 * mpage_map_buffers - update buffers corresponding to changed extent and
2302 * submit fully mapped pages for IO
2303 *
2304 * @mpd - description of extent to map, on return next extent to map
2305 *
2306 * Scan buffers corresponding to changed extent (we expect corresponding pages
2307 * to be already locked) and update buffer state according to new extent state.
2308 * We map delalloc buffers to their physical location, clear unwritten bits,
Lukas Czerner556615d2014-04-20 23:45:47 -04002309 * and mark buffers as uninit when we perform writes to unwritten extents
Jan Kara4e7ea812013-06-04 13:17:40 -04002310 * and do extent conversion after IO is finished. If the last page is not fully
2311 * mapped, we update @map to the next extent in the last page that needs
2312 * mapping. Otherwise we submit the page for IO.
2313 */
2314static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2315{
2316 struct pagevec pvec;
2317 int nr_pages, i;
2318 struct inode *inode = mpd->inode;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002319 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
Jan Kara4e7ea812013-06-04 13:17:40 -04002320 pgoff_t start, end;
2321 ext4_lblk_t lblk;
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302322 ext4_fsblk_t pblock;
Jan Kara4e7ea812013-06-04 13:17:40 -04002323 int err;
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302324 bool map_bh = false;
Jan Kara4e7ea812013-06-04 13:17:40 -04002325
2326 start = mpd->map.m_lblk >> bpp_bits;
2327 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2328 lblk = start << bpp_bits;
2329 pblock = mpd->map.m_pblk;
2330
Mel Gorman86679822017-11-15 17:37:52 -08002331 pagevec_init(&pvec);
Jan Kara4e7ea812013-06-04 13:17:40 -04002332 while (start <= end) {
Jan Kara2b85a612017-09-06 16:21:30 -07002333 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
Jan Kara397162f2017-09-06 16:21:43 -07002334 &start, end);
Jan Kara4e7ea812013-06-04 13:17:40 -04002335 if (nr_pages == 0)
2336 break;
2337 for (i = 0; i < nr_pages; i++) {
2338 struct page *page = pvec.pages[i];
2339
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302340 err = mpage_process_page(mpd, page, &lblk, &pblock,
2341 &map_bh);
Jan Kara4e7ea812013-06-04 13:17:40 -04002342 /*
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302343 * If map_bh is true, means page may require further bh
2344 * mapping, or maybe the page was submitted for IO.
2345 * So we return to call further extent mapping.
Jan Kara4e7ea812013-06-04 13:17:40 -04002346 */
Jason Yan39c0ae12020-04-20 12:29:18 +08002347 if (err < 0 || map_bh)
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302348 goto out;
Jan Kara4e7ea812013-06-04 13:17:40 -04002349 /* Page fully mapped - let IO run! */
2350 err = mpage_submit_page(mpd, page);
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302351 if (err < 0)
2352 goto out;
Jan Kara4e7ea812013-06-04 13:17:40 -04002353 }
2354 pagevec_release(&pvec);
2355 }
2356 /* Extent fully mapped and matches with page boundary. We are done. */
2357 mpd->map.m_len = 0;
2358 mpd->map.m_flags = 0;
2359 return 0;
Ritesh Harjani2943fdb2019-10-16 13:07:09 +05302360out:
2361 pagevec_release(&pvec);
2362 return err;
Jan Kara4e7ea812013-06-04 13:17:40 -04002363}
2364
2365static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2366{
2367 struct inode *inode = mpd->inode;
2368 struct ext4_map_blocks *map = &mpd->map;
2369 int get_blocks_flags;
Lukas Czerner090f32e2014-04-20 23:44:47 -04002370 int err, dioread_nolock;
Jan Kara4e7ea812013-06-04 13:17:40 -04002371
2372 trace_ext4_da_write_pages_extent(inode, map);
2373 /*
2374 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
Lukas Czerner556615d2014-04-20 23:45:47 -04002375 * to convert an unwritten extent to be initialized (in the case
Jan Kara4e7ea812013-06-04 13:17:40 -04002376 * where we have written into one or more preallocated blocks). It is
2377 * possible that we're going to need more metadata blocks than
2378 * previously reserved. However we must not fail because we're in
2379 * writeback and there is nothing we can do about it so it might result
2380 * in data loss. So use reserved blocks to allocate metadata if
2381 * possible.
2382 *
Theodore Ts'o754cfed2014-09-04 18:08:22 -04002383 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2384 * the blocks in question are delalloc blocks. This indicates
2385 * that the blocks and quotas has already been checked when
2386 * the data was copied into the page cache.
Jan Kara4e7ea812013-06-04 13:17:40 -04002387 */
2388 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
Jan Karaee0876b2016-04-24 00:56:08 -04002389 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2390 EXT4_GET_BLOCKS_IO_SUBMIT;
Lukas Czerner090f32e2014-04-20 23:44:47 -04002391 dioread_nolock = ext4_should_dioread_nolock(inode);
2392 if (dioread_nolock)
Jan Kara4e7ea812013-06-04 13:17:40 -04002393 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
Ritesh Harjani6db07462020-05-10 11:54:51 +05302394 if (map->m_flags & BIT(BH_Delay))
Jan Kara4e7ea812013-06-04 13:17:40 -04002395 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2396
2397 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2398 if (err < 0)
2399 return err;
Lukas Czerner090f32e2014-04-20 23:44:47 -04002400 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
Jan Kara6b523df2013-06-04 13:21:11 -04002401 if (!mpd->io_submit.io_end->handle &&
2402 ext4_handle_valid(handle)) {
2403 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2404 handle->h_rsv_handle = NULL;
2405 }
Jan Kara3613d222013-06-04 13:19:34 -04002406 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
Jan Kara6b523df2013-06-04 13:21:11 -04002407 }
Jan Kara4e7ea812013-06-04 13:17:40 -04002408
2409 BUG_ON(map->m_len == 0);
Jan Kara4e7ea812013-06-04 13:17:40 -04002410 return 0;
2411}
2412
2413/*
2414 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2415 * mpd->len and submit pages underlying it for IO
2416 *
2417 * @handle - handle for journal operations
2418 * @mpd - extent to map
Jan Kara7534e852013-10-16 08:26:08 -04002419 * @give_up_on_write - we set this to true iff there is a fatal error and there
2420 * is no hope of writing the data. The caller should discard
2421 * dirty pages to avoid infinite loops.
Jan Kara4e7ea812013-06-04 13:17:40 -04002422 *
2423 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2424 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2425 * them to initialized or split the described range from larger unwritten
2426 * extent. Note that we need not map all the described range since allocation
2427 * can return less blocks or the range is covered by more unwritten extents. We
2428 * cannot map more because we are limited by reserved transaction credits. On
2429 * the other hand we always make sure that the last touched page is fully
2430 * mapped so that it can be written out (and thus forward progress is
2431 * guaranteed). After mapping we submit all mapped pages for IO.
2432 */
2433static int mpage_map_and_submit_extent(handle_t *handle,
Theodore Ts'ocb530542013-07-01 08:12:40 -04002434 struct mpage_da_data *mpd,
2435 bool *give_up_on_write)
Jan Kara4e7ea812013-06-04 13:17:40 -04002436{
2437 struct inode *inode = mpd->inode;
2438 struct ext4_map_blocks *map = &mpd->map;
2439 int err;
2440 loff_t disksize;
Dmitry Monakhov66031202014-08-27 18:40:03 -04002441 int progress = 0;
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302442 ext4_io_end_t *io_end = mpd->io_submit.io_end;
Ritesh Harjani4d06bfb2019-11-06 15:08:09 +05302443 struct ext4_io_end_vec *io_end_vec;
Jan Kara4e7ea812013-06-04 13:17:40 -04002444
Ritesh Harjani4d06bfb2019-11-06 15:08:09 +05302445 io_end_vec = ext4_alloc_io_end_vec(io_end);
2446 if (IS_ERR(io_end_vec))
2447 return PTR_ERR(io_end_vec);
Ritesh Harjanic8cc8812019-10-16 13:07:10 +05302448 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
Jan Kara27d7c4e2013-07-05 21:57:22 -04002449 do {
Jan Kara4e7ea812013-06-04 13:17:40 -04002450 err = mpage_map_one_extent(handle, mpd);
2451 if (err < 0) {
2452 struct super_block *sb = inode->i_sb;
2453
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002454 if (ext4_forced_shutdown(EXT4_SB(sb)) ||
Harshad Shirwadkar9b5f6c92020-11-05 19:59:09 -08002455 ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
Theodore Ts'ocb530542013-07-01 08:12:40 -04002456 goto invalidate_dirty_pages;
Jan Kara4e7ea812013-06-04 13:17:40 -04002457 /*
Theodore Ts'ocb530542013-07-01 08:12:40 -04002458 * Let the uper layers retry transient errors.
2459 * In the case of ENOSPC, if ext4_count_free_blocks()
2460 * is non-zero, a commit should free up blocks.
Jan Kara4e7ea812013-06-04 13:17:40 -04002461 */
Theodore Ts'ocb530542013-07-01 08:12:40 -04002462 if ((err == -ENOMEM) ||
Dmitry Monakhov66031202014-08-27 18:40:03 -04002463 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2464 if (progress)
2465 goto update_disksize;
Theodore Ts'ocb530542013-07-01 08:12:40 -04002466 return err;
Dmitry Monakhov66031202014-08-27 18:40:03 -04002467 }
Theodore Ts'ocb530542013-07-01 08:12:40 -04002468 ext4_msg(sb, KERN_CRIT,
2469 "Delayed block allocation failed for "
2470 "inode %lu at logical offset %llu with"
2471 " max blocks %u with error %d",
2472 inode->i_ino,
2473 (unsigned long long)map->m_lblk,
2474 (unsigned)map->m_len, -err);
2475 ext4_msg(sb, KERN_CRIT,
2476 "This should not happen!! Data will "
2477 "be lost\n");
2478 if (err == -ENOSPC)
2479 ext4_print_free_blocks(inode);
2480 invalidate_dirty_pages:
2481 *give_up_on_write = true;
Jan Kara4e7ea812013-06-04 13:17:40 -04002482 return err;
2483 }
Dmitry Monakhov66031202014-08-27 18:40:03 -04002484 progress = 1;
Jan Kara4e7ea812013-06-04 13:17:40 -04002485 /*
2486 * Update buffer state, submit mapped pages, and get us new
2487 * extent to map
2488 */
2489 err = mpage_map_and_submit_buffers(mpd);
2490 if (err < 0)
Dmitry Monakhov66031202014-08-27 18:40:03 -04002491 goto update_disksize;
Jan Kara27d7c4e2013-07-05 21:57:22 -04002492 } while (map->m_len);
Jan Kara4e7ea812013-06-04 13:17:40 -04002493
Dmitry Monakhov66031202014-08-27 18:40:03 -04002494update_disksize:
Theodore Ts'o622cad12014-04-11 10:35:17 -04002495 /*
2496 * Update on-disk size after IO is submitted. Races with
2497 * truncate are avoided by checking i_size under i_data_sem.
2498 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002499 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
Qian Cai35df4292020-02-07 09:29:11 -05002500 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
Jan Kara4e7ea812013-06-04 13:17:40 -04002501 int err2;
Theodore Ts'o622cad12014-04-11 10:35:17 -04002502 loff_t i_size;
Jan Kara4e7ea812013-06-04 13:17:40 -04002503
Theodore Ts'o622cad12014-04-11 10:35:17 -04002504 down_write(&EXT4_I(inode)->i_data_sem);
2505 i_size = i_size_read(inode);
2506 if (disksize > i_size)
2507 disksize = i_size;
2508 if (disksize > EXT4_I(inode)->i_disksize)
2509 EXT4_I(inode)->i_disksize = disksize;
Theodore Ts'o622cad12014-04-11 10:35:17 -04002510 up_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'ob907f2d2017-01-11 22:14:49 -05002511 err2 = ext4_mark_inode_dirty(handle, inode);
Theodore Ts'o878520a2019-11-19 21:54:15 -05002512 if (err2) {
Theodore Ts'o54d3adb2020-03-28 19:33:43 -04002513 ext4_error_err(inode->i_sb, -err2,
2514 "Failed to mark inode %lu dirty",
2515 inode->i_ino);
Theodore Ts'o878520a2019-11-19 21:54:15 -05002516 }
Jan Kara4e7ea812013-06-04 13:17:40 -04002517 if (!err)
2518 err = err2;
2519 }
2520 return err;
2521}
2522
2523/*
Jan Karafffb2732013-06-04 13:01:11 -04002524 * Calculate the total number of credits to reserve for one writepages
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002525 * iteration. This is called from ext4_writepages(). We map an extent of
Anatol Pomozov70261f52013-08-28 14:40:12 -04002526 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
Jan Karafffb2732013-06-04 13:01:11 -04002527 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2528 * bpp - 1 blocks in bpp different extents.
2529 */
Mingming Cao525f4ed2008-08-19 22:15:58 -04002530static int ext4_da_writepages_trans_blocks(struct inode *inode)
2531{
Jan Karafffb2732013-06-04 13:01:11 -04002532 int bpp = ext4_journal_blocks_per_page(inode);
Mingming Cao525f4ed2008-08-19 22:15:58 -04002533
Jan Karafffb2732013-06-04 13:01:11 -04002534 return ext4_meta_trans_blocks(inode,
2535 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
Mingming Cao525f4ed2008-08-19 22:15:58 -04002536}
Mingming Cao61628a32008-07-11 19:27:31 -04002537
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002538/*
Jan Kara4e7ea812013-06-04 13:17:40 -04002539 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2540 * and underlying extent to map
2541 *
2542 * @mpd - where to look for pages
2543 *
2544 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2545 * IO immediately. When we find a page which isn't mapped we start accumulating
2546 * extent of buffers underlying these pages that needs mapping (formed by
2547 * either delayed or unwritten buffers). We also lock the pages containing
2548 * these buffers. The extent found is returned in @mpd structure (starting at
2549 * mpd->lblk with length mpd->len blocks).
2550 *
2551 * Note that this function can attach bios to one io_end structure which are
2552 * neither logically nor physically contiguous. Although it may seem as an
2553 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2554 * case as we need to track IO to all buffers underlying a page in one io_end.
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002555 */
Jan Kara4e7ea812013-06-04 13:17:40 -04002556static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002557{
Jan Kara4e7ea812013-06-04 13:17:40 -04002558 struct address_space *mapping = mpd->inode->i_mapping;
2559 struct pagevec pvec;
2560 unsigned int nr_pages;
Ming Leiaeac5892013-10-17 18:56:16 -04002561 long left = mpd->wbc->nr_to_write;
Jan Kara4e7ea812013-06-04 13:17:40 -04002562 pgoff_t index = mpd->first_page;
2563 pgoff_t end = mpd->last_page;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05002564 xa_mark_t tag;
Jan Kara4e7ea812013-06-04 13:17:40 -04002565 int i, err = 0;
2566 int blkbits = mpd->inode->i_blkbits;
2567 ext4_lblk_t lblk;
2568 struct buffer_head *head;
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002569
Jan Kara4e7ea812013-06-04 13:17:40 -04002570 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
Eric Sandeen5b41d922010-10-27 21:30:13 -04002571 tag = PAGECACHE_TAG_TOWRITE;
2572 else
2573 tag = PAGECACHE_TAG_DIRTY;
2574
Mel Gorman86679822017-11-15 17:37:52 -08002575 pagevec_init(&pvec);
Jan Kara4e7ea812013-06-04 13:17:40 -04002576 mpd->map.m_len = 0;
2577 mpd->next_page = index;
Theodore Ts'o4f01b022011-02-26 14:07:37 -05002578 while (index <= end) {
Jan Karadc7f3e82017-11-15 17:34:44 -08002579 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08002580 tag);
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002581 if (nr_pages == 0)
Jan Kara6b8ed622020-05-25 10:12:15 +02002582 break;
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002583
2584 for (i = 0; i < nr_pages; i++) {
2585 struct page *page = pvec.pages[i];
2586
2587 /*
Ming Leiaeac5892013-10-17 18:56:16 -04002588 * Accumulated enough dirty pages? This doesn't apply
2589 * to WB_SYNC_ALL mode. For integrity sync we have to
2590 * keep going because someone may be concurrently
2591 * dirtying pages, and we might have synced a lot of
2592 * newly appeared dirty pages, but have not synced all
2593 * of the old dirty pages.
2594 */
2595 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2596 goto out;
2597
Jan Kara4e7ea812013-06-04 13:17:40 -04002598 /* If we can't merge this page, we are done. */
2599 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2600 goto out;
Theodore Ts'o78aaced2011-02-26 14:09:14 -05002601
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002602 lock_page(page);
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002603 /*
Jan Kara4e7ea812013-06-04 13:17:40 -04002604 * If the page is no longer dirty, or its mapping no
2605 * longer corresponds to inode we are writing (which
2606 * means it has been truncated or invalidated), or the
2607 * page is already under writeback and we are not doing
2608 * a data integrity writeback, skip the page
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002609 */
Theodore Ts'o4f01b022011-02-26 14:07:37 -05002610 if (!PageDirty(page) ||
2611 (PageWriteback(page) &&
Jan Kara4e7ea812013-06-04 13:17:40 -04002612 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
Theodore Ts'o4f01b022011-02-26 14:07:37 -05002613 unlikely(page->mapping != mapping)) {
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002614 unlock_page(page);
2615 continue;
2616 }
2617
Darrick J. Wong7cb1a532011-05-18 13:53:20 -04002618 wait_on_page_writeback(page);
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002619 BUG_ON(PageWriteback(page));
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002620
Jan Kara4e7ea812013-06-04 13:17:40 -04002621 if (mpd->map.m_len == 0)
Theodore Ts'o8eb9e5c2011-02-26 14:07:31 -05002622 mpd->first_page = page->index;
Theodore Ts'o8eb9e5c2011-02-26 14:07:31 -05002623 mpd->next_page = page->index + 1;
Jan Karaf8bec372013-01-28 12:55:08 -05002624 /* Add all dirty buffers to mpd */
Jan Kara4e7ea812013-06-04 13:17:40 -04002625 lblk = ((ext4_lblk_t)page->index) <<
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002626 (PAGE_SHIFT - blkbits);
Jan Karaf8bec372013-01-28 12:55:08 -05002627 head = page_buffers(page);
Jan Kara5f1132b2013-08-17 10:02:33 -04002628 err = mpage_process_page_bufs(mpd, head, head, lblk);
2629 if (err <= 0)
Jan Kara4e7ea812013-06-04 13:17:40 -04002630 goto out;
Jan Kara5f1132b2013-08-17 10:02:33 -04002631 err = 0;
Ming Leiaeac5892013-10-17 18:56:16 -04002632 left--;
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002633 }
2634 pagevec_release(&pvec);
2635 cond_resched();
2636 }
Jan Kara6b8ed622020-05-25 10:12:15 +02002637 mpd->scanned_until_end = 1;
Theodore Ts'o4f01b022011-02-26 14:07:37 -05002638 return 0;
Theodore Ts'o8eb9e5c2011-02-26 14:07:31 -05002639out:
2640 pagevec_release(&pvec);
Jan Kara4e7ea812013-06-04 13:17:40 -04002641 return err;
Theodore Ts'o8e48dcf2010-05-16 18:00:00 -04002642}
2643
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002644static int ext4_writepages(struct address_space *mapping,
2645 struct writeback_control *wbc)
Alex Tomas64769242008-07-11 19:27:31 -04002646{
Jan Kara4e7ea812013-06-04 13:17:40 -04002647 pgoff_t writeback_index = 0;
2648 long nr_to_write = wbc->nr_to_write;
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002649 int range_whole = 0;
Jan Kara4e7ea812013-06-04 13:17:40 -04002650 int cycled = 1;
Mingming Cao61628a32008-07-11 19:27:31 -04002651 handle_t *handle = NULL;
Aneesh Kumar K.Vdf222912008-09-08 23:05:34 -04002652 struct mpage_da_data mpd;
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04002653 struct inode *inode = mapping->host;
Jan Kara6b523df2013-06-04 13:21:11 -04002654 int needed_blocks, rsv_blocks = 0, ret = 0;
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04002655 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
Shaohua Li1bce63d12011-10-18 10:55:51 -04002656 struct blk_plug plug;
Theodore Ts'ocb530542013-07-01 08:12:40 -04002657 bool give_up_on_write = false;
Mingming Cao61628a32008-07-11 19:27:31 -04002658
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002659 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2660 return -EIO;
2661
Eric Biggersbbd55932020-02-19 10:30:46 -08002662 percpu_down_read(&sbi->s_writepages_rwsem);
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002663 trace_ext4_writepages(inode, wbc);
Theodore Ts'oba80b102009-01-03 20:03:21 -05002664
Mingming Cao61628a32008-07-11 19:27:31 -04002665 /*
2666 * No pages to write? This is mainly a kludge to avoid starting
2667 * a transaction for special inodes like journal inode on last iput()
2668 * because that could violate lock ordering on umount
2669 */
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002670 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
Ming Leibbf023c72013-10-30 07:27:16 -04002671 goto out_writepages;
Theodore Ts'o2a21e372008-11-05 09:22:24 -05002672
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002673 if (ext4_should_journal_data(inode)) {
Goldwyn Rodrigues043d20d2018-03-26 01:32:50 -04002674 ret = generic_writepages(mapping, wbc);
Ming Leibbf023c72013-10-30 07:27:16 -04002675 goto out_writepages;
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002676 }
2677
Theodore Ts'o2a21e372008-11-05 09:22:24 -05002678 /*
2679 * If the filesystem has aborted, it is read-only, so return
2680 * right away instead of dumping stack traces later on that
2681 * will obscure the real source of the problem. We test
Linus Torvalds1751e8a2017-11-27 13:05:09 -08002682 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
Theodore Ts'o2a21e372008-11-05 09:22:24 -05002683 * the latter could be true if the filesystem is mounted
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002684 * read-only, and in that case, ext4_writepages should
Theodore Ts'o2a21e372008-11-05 09:22:24 -05002685 * *never* be called, so if that ever happens, we would want
2686 * the stack trace.
2687 */
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002688 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
Harshad Shirwadkar9b5f6c92020-11-05 19:59:09 -08002689 ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
Ming Leibbf023c72013-10-30 07:27:16 -04002690 ret = -EROFS;
2691 goto out_writepages;
2692 }
Theodore Ts'o2a21e372008-11-05 09:22:24 -05002693
Jan Kara4e7ea812013-06-04 13:17:40 -04002694 /*
2695 * If we have inline data and arrive here, it means that
2696 * we will soon create the block for the 1st page, so
2697 * we'd better clear the inline data here.
2698 */
2699 if (ext4_has_inline_data(inode)) {
2700 /* Just inode will be modified... */
2701 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2702 if (IS_ERR(handle)) {
2703 ret = PTR_ERR(handle);
2704 goto out_writepages;
2705 }
2706 BUG_ON(ext4_test_inode_state(inode,
2707 EXT4_STATE_MAY_INLINE_DATA));
2708 ext4_destroy_inline_data(handle, inode);
2709 ext4_journal_stop(handle);
2710 }
2711
yangerkun4e343232019-08-11 16:27:41 -04002712 if (ext4_should_dioread_nolock(inode)) {
2713 /*
2714 * We may need to convert up to one extent per block in
2715 * the page and we may dirty the inode.
2716 */
2717 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2718 PAGE_SIZE >> inode->i_blkbits);
2719 }
2720
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002721 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2722 range_whole = 1;
Mingming Cao61628a32008-07-11 19:27:31 -04002723
Aneesh Kumar K.V2acf2c22009-02-14 10:42:58 -05002724 if (wbc->range_cyclic) {
Jan Kara4e7ea812013-06-04 13:17:40 -04002725 writeback_index = mapping->writeback_index;
2726 if (writeback_index)
Aneesh Kumar K.V2acf2c22009-02-14 10:42:58 -05002727 cycled = 0;
Jan Kara4e7ea812013-06-04 13:17:40 -04002728 mpd.first_page = writeback_index;
2729 mpd.last_page = -1;
Eric Sandeen5b41d922010-10-27 21:30:13 -04002730 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002731 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2732 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
Eric Sandeen5b41d922010-10-27 21:30:13 -04002733 }
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002734
Jan Kara4e7ea812013-06-04 13:17:40 -04002735 mpd.inode = inode;
2736 mpd.wbc = wbc;
2737 ext4_io_submit_init(&mpd.io_submit, wbc);
Aneesh Kumar K.V2acf2c22009-02-14 10:42:58 -05002738retry:
Wu Fengguang6e6938b2010-06-06 10:38:15 -06002739 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
Jan Kara4e7ea812013-06-04 13:17:40 -04002740 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
Shaohua Li1bce63d12011-10-18 10:55:51 -04002741 blk_start_plug(&plug);
Jan Karadddbd6a2017-04-30 18:29:10 -04002742
2743 /*
2744 * First writeback pages that don't need mapping - we can avoid
2745 * starting a transaction unnecessarily and also avoid being blocked
2746 * in the block layer on device congestion while having transaction
2747 * started.
2748 */
2749 mpd.do_map = 0;
Jan Kara6b8ed622020-05-25 10:12:15 +02002750 mpd.scanned_until_end = 0;
Jan Karadddbd6a2017-04-30 18:29:10 -04002751 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2752 if (!mpd.io_submit.io_end) {
2753 ret = -ENOMEM;
2754 goto unplug;
2755 }
2756 ret = mpage_prepare_extent_to_map(&mpd);
Xiaoguang Wanga297b2f2019-02-10 23:53:21 -05002757 /* Unlock pages we didn't use */
2758 mpage_release_unused_pages(&mpd, false);
Jan Karadddbd6a2017-04-30 18:29:10 -04002759 /* Submit prepared bio */
2760 ext4_io_submit(&mpd.io_submit);
2761 ext4_put_io_end_defer(mpd.io_submit.io_end);
2762 mpd.io_submit.io_end = NULL;
Jan Karadddbd6a2017-04-30 18:29:10 -04002763 if (ret < 0)
2764 goto unplug;
2765
Jan Kara6b8ed622020-05-25 10:12:15 +02002766 while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
Jan Kara4e7ea812013-06-04 13:17:40 -04002767 /* For each extent of pages we use new io_end */
2768 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2769 if (!mpd.io_submit.io_end) {
2770 ret = -ENOMEM;
2771 break;
2772 }
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002773
2774 /*
Jan Kara4e7ea812013-06-04 13:17:40 -04002775 * We have two constraints: We find one extent to map and we
2776 * must always write out whole page (makes a difference when
2777 * blocksize < pagesize) so that we don't block on IO when we
2778 * try to write out the rest of the page. Journalled mode is
2779 * not supported by delalloc.
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002780 */
2781 BUG_ON(ext4_should_journal_data(inode));
Mingming Cao525f4ed2008-08-19 22:15:58 -04002782 needed_blocks = ext4_da_writepages_trans_blocks(inode);
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002783
Jan Kara4e7ea812013-06-04 13:17:40 -04002784 /* start a new transaction */
Jan Kara6b523df2013-06-04 13:21:11 -04002785 handle = ext4_journal_start_with_reserve(inode,
2786 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
Mingming Cao61628a32008-07-11 19:27:31 -04002787 if (IS_ERR(handle)) {
2788 ret = PTR_ERR(handle);
Theodore Ts'o16939182009-09-26 17:43:59 -04002789 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
Curt Wohlgemuthfbe845d2010-05-16 13:00:00 -04002790 "%ld pages, ino %lu; err %d", __func__,
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002791 wbc->nr_to_write, inode->i_ino, ret);
Jan Kara4e7ea812013-06-04 13:17:40 -04002792 /* Release allocated io_end */
2793 ext4_put_io_end(mpd.io_submit.io_end);
Jan Karadddbd6a2017-04-30 18:29:10 -04002794 mpd.io_submit.io_end = NULL;
Jan Kara4e7ea812013-06-04 13:17:40 -04002795 break;
Mingming Cao61628a32008-07-11 19:27:31 -04002796 }
Jan Karadddbd6a2017-04-30 18:29:10 -04002797 mpd.do_map = 1;
Theodore Ts'of63e60052009-02-23 16:42:39 -05002798
Jan Kara4e7ea812013-06-04 13:17:40 -04002799 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2800 ret = mpage_prepare_extent_to_map(&mpd);
Jan Kara6b8ed622020-05-25 10:12:15 +02002801 if (!ret && mpd.map.m_len)
2802 ret = mpage_map_and_submit_extent(handle, &mpd,
Theodore Ts'ocb530542013-07-01 08:12:40 -04002803 &give_up_on_write);
Jan Kara646caa92016-07-04 10:14:01 -04002804 /*
2805 * Caution: If the handle is synchronous,
2806 * ext4_journal_stop() can wait for transaction commit
2807 * to finish which may depend on writeback of pages to
2808 * complete or on page lock to be released. In that
Randy Dunlapb483bb72020-08-04 19:48:50 -07002809 * case, we have to wait until after we have
Jan Kara646caa92016-07-04 10:14:01 -04002810 * submitted all the IO, released page locks we hold,
2811 * and dropped io_end reference (for extent conversion
2812 * to be able to complete) before stopping the handle.
2813 */
2814 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2815 ext4_journal_stop(handle);
2816 handle = NULL;
Jan Karadddbd6a2017-04-30 18:29:10 -04002817 mpd.do_map = 0;
Jan Kara646caa92016-07-04 10:14:01 -04002818 }
Jan Kara4e7ea812013-06-04 13:17:40 -04002819 /* Unlock pages we didn't use */
Theodore Ts'ocb530542013-07-01 08:12:40 -04002820 mpage_release_unused_pages(&mpd, give_up_on_write);
Xiaoguang Wanga297b2f2019-02-10 23:53:21 -05002821 /* Submit prepared bio */
2822 ext4_io_submit(&mpd.io_submit);
2823
Jan Kara646caa92016-07-04 10:14:01 -04002824 /*
2825 * Drop our io_end reference we got from init. We have
2826 * to be careful and use deferred io_end finishing if
2827 * we are still holding the transaction as we can
2828 * release the last reference to io_end which may end
2829 * up doing unwritten extent conversion.
2830 */
2831 if (handle) {
2832 ext4_put_io_end_defer(mpd.io_submit.io_end);
2833 ext4_journal_stop(handle);
2834 } else
2835 ext4_put_io_end(mpd.io_submit.io_end);
Jan Karadddbd6a2017-04-30 18:29:10 -04002836 mpd.io_submit.io_end = NULL;
Aneesh Kumar K.Vdf222912008-09-08 23:05:34 -04002837
Jan Kara4e7ea812013-06-04 13:17:40 -04002838 if (ret == -ENOSPC && sbi->s_journal) {
2839 /*
2840 * Commit the transaction which would
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002841 * free blocks released in the transaction
2842 * and try again
2843 */
Aneesh Kumar K.Vdf222912008-09-08 23:05:34 -04002844 jbd2_journal_force_commit_nested(sbi->s_journal);
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002845 ret = 0;
Jan Kara4e7ea812013-06-04 13:17:40 -04002846 continue;
2847 }
2848 /* Fatal error - ENOMEM, EIO... */
2849 if (ret)
Mingming Cao61628a32008-07-11 19:27:31 -04002850 break;
Mingming Cao61628a32008-07-11 19:27:31 -04002851 }
Jan Karadddbd6a2017-04-30 18:29:10 -04002852unplug:
Shaohua Li1bce63d12011-10-18 10:55:51 -04002853 blk_finish_plug(&plug);
Jan Kara9c12a832013-09-16 08:24:26 -04002854 if (!ret && !cycled && wbc->nr_to_write > 0) {
Aneesh Kumar K.V2acf2c22009-02-14 10:42:58 -05002855 cycled = 1;
Jan Kara4e7ea812013-06-04 13:17:40 -04002856 mpd.last_page = writeback_index - 1;
2857 mpd.first_page = 0;
Aneesh Kumar K.V2acf2c22009-02-14 10:42:58 -05002858 goto retry;
2859 }
Mingming Cao61628a32008-07-11 19:27:31 -04002860
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002861 /* Update index */
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002862 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2863 /*
Jan Kara4e7ea812013-06-04 13:17:40 -04002864 * Set the writeback_index so that range_cyclic
Aneesh Kumar K.V22208de2008-10-16 10:10:36 -04002865 * mode will write it back later
2866 */
Jan Kara4e7ea812013-06-04 13:17:40 -04002867 mapping->writeback_index = mpd.first_page;
Aneesh Kumar K.Va1d6cc52008-08-19 21:55:02 -04002868
Mingming Cao61628a32008-07-11 19:27:31 -04002869out_writepages:
Theodore Ts'o20970ba2013-06-06 14:00:46 -04002870 trace_ext4_writepages_result(inode, wbc, ret,
2871 nr_to_write - wbc->nr_to_write);
Eric Biggersbbd55932020-02-19 10:30:46 -08002872 percpu_up_read(&sbi->s_writepages_rwsem);
Mingming Cao61628a32008-07-11 19:27:31 -04002873 return ret;
Alex Tomas64769242008-07-11 19:27:31 -04002874}
2875
Dan Williams5f0663b2017-12-21 12:25:11 -08002876static int ext4_dax_writepages(struct address_space *mapping,
2877 struct writeback_control *wbc)
2878{
2879 int ret;
2880 long nr_to_write = wbc->nr_to_write;
2881 struct inode *inode = mapping->host;
2882 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2883
2884 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2885 return -EIO;
2886
Eric Biggersbbd55932020-02-19 10:30:46 -08002887 percpu_down_read(&sbi->s_writepages_rwsem);
Dan Williams5f0663b2017-12-21 12:25:11 -08002888 trace_ext4_writepages(inode, wbc);
2889
Vivek Goyal3f666c52020-01-03 13:33:07 -05002890 ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
Dan Williams5f0663b2017-12-21 12:25:11 -08002891 trace_ext4_writepages_result(inode, wbc, ret,
2892 nr_to_write - wbc->nr_to_write);
Eric Biggersbbd55932020-02-19 10:30:46 -08002893 percpu_up_read(&sbi->s_writepages_rwsem);
Dan Williams5f0663b2017-12-21 12:25:11 -08002894 return ret;
2895}
2896
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002897static int ext4_nonda_switch(struct super_block *sb)
2898{
Eric Whitney5c1ff332013-04-09 09:27:31 -04002899 s64 free_clusters, dirty_clusters;
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002900 struct ext4_sb_info *sbi = EXT4_SB(sb);
2901
2902 /*
2903 * switch to non delalloc mode if we are running low
2904 * on free block. The free block accounting via percpu
Eric Dumazet179f7eb2009-01-06 14:41:04 -08002905 * counters can get slightly wrong with percpu_counter_batch getting
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002906 * accumulated on each CPU without updating global counters
2907 * Delalloc need an accurate free block accounting. So switch
2908 * to non delalloc when we are near to error range.
2909 */
Eric Whitney5c1ff332013-04-09 09:27:31 -04002910 free_clusters =
2911 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2912 dirty_clusters =
2913 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
Theodore Ts'o00d4e732012-09-19 22:42:36 -04002914 /*
2915 * Start pushing delalloc when 1/2 of free blocks are dirty.
2916 */
Eric Whitney5c1ff332013-04-09 09:27:31 -04002917 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
Miao Xie10ee27a2013-01-10 13:47:57 +08002918 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
Theodore Ts'o00d4e732012-09-19 22:42:36 -04002919
Eric Whitney5c1ff332013-04-09 09:27:31 -04002920 if (2 * free_clusters < 3 * dirty_clusters ||
2921 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002922 /*
Eric Sandeenc8afb442009-12-23 07:58:12 -05002923 * free block count is less than 150% of dirty blocks
2924 * or free blocks is less than watermark
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002925 */
2926 return 1;
2927 }
2928 return 0;
2929}
2930
Eric Sandeen0ff89472014-10-11 19:51:17 -04002931/* We always reserve for an inode update; the superblock could be there too */
2932static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2933{
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04002934 if (likely(ext4_has_feature_large_file(inode->i_sb)))
Eric Sandeen0ff89472014-10-11 19:51:17 -04002935 return 1;
2936
2937 if (pos + len <= 0x7fffffffULL)
2938 return 1;
2939
2940 /* We might need to update the superblock to set LARGE_FILE */
2941 return 2;
2942}
2943
Alex Tomas64769242008-07-11 19:27:31 -04002944static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04002945 loff_t pos, unsigned len, unsigned flags,
2946 struct page **pagep, void **fsdata)
Alex Tomas64769242008-07-11 19:27:31 -04002947{
Eric Sandeen72b8ab92010-05-16 11:00:00 -04002948 int ret, retries = 0;
Alex Tomas64769242008-07-11 19:27:31 -04002949 struct page *page;
2950 pgoff_t index;
Alex Tomas64769242008-07-11 19:27:31 -04002951 struct inode *inode = mapping->host;
2952 handle_t *handle;
2953
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05002954 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2955 return -EIO;
2956
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002957 index = pos >> PAGE_SHIFT;
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002958
Eric Biggersc93d8f82019-07-22 09:26:24 -07002959 if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
2960 ext4_verity_in_progress(inode)) {
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04002961 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2962 return ext4_write_begin(file, mapping, pos,
2963 len, flags, pagep, fsdata);
2964 }
2965 *fsdata = (void *)0;
Theodore Ts'o9bffad12009-06-17 11:48:11 -04002966 trace_ext4_da_write_begin(inode, pos, len, flags);
Tao Ma9c3569b2012-12-10 14:05:57 -05002967
2968 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2969 ret = ext4_da_write_inline_data_begin(mapping, inode,
2970 pos, len, flags,
2971 pagep, fsdata);
2972 if (ret < 0)
Theodore Ts'o47564bf2013-02-09 09:24:14 -05002973 return ret;
2974 if (ret == 1)
2975 return 0;
Tao Ma9c3569b2012-12-10 14:05:57 -05002976 }
2977
Theodore Ts'o47564bf2013-02-09 09:24:14 -05002978 /*
2979 * grab_cache_page_write_begin() can take a long time if the
2980 * system is thrashing due to memory pressure, or if the page
2981 * is being written back. So grab it first before we start
2982 * the transaction handle. This also allows us to allocate
2983 * the page (if needed) without using GFP_NOFS.
2984 */
2985retry_grab:
2986 page = grab_cache_page_write_begin(mapping, index, flags);
2987 if (!page)
2988 return -ENOMEM;
2989 unlock_page(page);
2990
Alex Tomas64769242008-07-11 19:27:31 -04002991 /*
2992 * With delayed allocation, we don't log the i_disksize update
2993 * if there is delayed block allocation. But we still need
2994 * to journalling the i_disksize update if writes to the end
2995 * of file which has an already mapped buffer.
2996 */
Theodore Ts'o47564bf2013-02-09 09:24:14 -05002997retry_journal:
Eric Sandeen0ff89472014-10-11 19:51:17 -04002998 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2999 ext4_da_write_credits(inode, pos, len));
Alex Tomas64769242008-07-11 19:27:31 -04003000 if (IS_ERR(handle)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003001 put_page(page);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003002 return PTR_ERR(handle);
Alex Tomas64769242008-07-11 19:27:31 -04003003 }
3004
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003005 lock_page(page);
3006 if (page->mapping != mapping) {
3007 /* The page got truncated from under us */
3008 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003009 put_page(page);
Eric Sandeend5a0d4f2008-08-02 18:51:06 -04003010 ext4_journal_stop(handle);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003011 goto retry_grab;
Eric Sandeend5a0d4f2008-08-02 18:51:06 -04003012 }
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003013 /* In case writeback began while the page was unlocked */
Dmitry Monakhov7afe5aa2013-08-28 14:30:47 -04003014 wait_for_stable_page(page);
Alex Tomas64769242008-07-11 19:27:31 -04003015
Chandan Rajendra643fa962018-12-12 15:20:12 +05303016#ifdef CONFIG_FS_ENCRYPTION
Michael Halcrow2058f832015-04-12 00:55:10 -04003017 ret = ext4_block_write_begin(page, pos, len,
3018 ext4_da_get_block_prep);
3019#else
Christoph Hellwig6e1db882010-06-04 11:29:57 +02003020 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
Michael Halcrow2058f832015-04-12 00:55:10 -04003021#endif
Alex Tomas64769242008-07-11 19:27:31 -04003022 if (ret < 0) {
3023 unlock_page(page);
3024 ext4_journal_stop(handle);
Aneesh Kumar K.Vae4d5372008-09-13 13:10:25 -04003025 /*
3026 * block_write_begin may have instantiated a few blocks
3027 * outside i_size. Trim these off again. Don't need
3028 * i_size_read because we hold i_mutex.
3029 */
3030 if (pos + len > inode->i_size)
Jan Karab9a42072009-12-08 21:24:33 -05003031 ext4_truncate_failed_write(inode);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003032
3033 if (ret == -ENOSPC &&
3034 ext4_should_retry_alloc(inode->i_sb, &retries))
3035 goto retry_journal;
3036
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003037 put_page(page);
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003038 return ret;
Alex Tomas64769242008-07-11 19:27:31 -04003039 }
3040
Theodore Ts'o47564bf2013-02-09 09:24:14 -05003041 *pagep = page;
Alex Tomas64769242008-07-11 19:27:31 -04003042 return ret;
3043}
3044
Mingming Cao632eaea2008-07-11 19:27:31 -04003045/*
3046 * Check if we should update i_disksize
3047 * when write to the end of file but not require block allocation
3048 */
3049static int ext4_da_should_update_i_disksize(struct page *page,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04003050 unsigned long offset)
Mingming Cao632eaea2008-07-11 19:27:31 -04003051{
3052 struct buffer_head *bh;
3053 struct inode *inode = page->mapping->host;
3054 unsigned int idx;
3055 int i;
3056
3057 bh = page_buffers(page);
3058 idx = offset >> inode->i_blkbits;
3059
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04003060 for (i = 0; i < idx; i++)
Mingming Cao632eaea2008-07-11 19:27:31 -04003061 bh = bh->b_this_page;
3062
Aneesh Kumar K.V29fa89d2009-05-12 16:30:27 -04003063 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
Mingming Cao632eaea2008-07-11 19:27:31 -04003064 return 0;
3065 return 1;
3066}
3067
Alex Tomas64769242008-07-11 19:27:31 -04003068static int ext4_da_write_end(struct file *file,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04003069 struct address_space *mapping,
3070 loff_t pos, unsigned len, unsigned copied,
3071 struct page *page, void *fsdata)
Alex Tomas64769242008-07-11 19:27:31 -04003072{
3073 struct inode *inode = mapping->host;
3074 int ret = 0, ret2;
3075 handle_t *handle = ext4_journal_current_handle();
3076 loff_t new_i_size;
Mingming Cao632eaea2008-07-11 19:27:31 -04003077 unsigned long start, end;
Aneesh Kumar K.V79f0be82008-10-08 23:13:30 -04003078 int write_mode = (int)(unsigned long)fsdata;
3079
Theodore Ts'o74d553a2013-04-03 12:39:17 -04003080 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3081 return ext4_write_end(file, mapping, pos,
3082 len, copied, page, fsdata);
Mingming Cao632eaea2008-07-11 19:27:31 -04003083
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003084 trace_ext4_da_write_end(inode, pos, len, copied);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003085 start = pos & (PAGE_SIZE - 1);
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04003086 end = start + copied - 1;
Alex Tomas64769242008-07-11 19:27:31 -04003087
3088 /*
3089 * generic_write_end() will run mark_inode_dirty() if i_size
3090 * changes. So let's piggyback the i_disksize mark_inode_dirty
3091 * into that.
3092 */
Alex Tomas64769242008-07-11 19:27:31 -04003093 new_i_size = pos + copied;
Andrea Arcangeliea51d132011-12-13 21:41:15 -05003094 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
Tao Ma9c3569b2012-12-10 14:05:57 -05003095 if (ext4_has_inline_data(inode) ||
3096 ext4_da_should_update_i_disksize(page, end)) {
Dmitry Monakhovee124d22014-08-30 23:34:06 -04003097 ext4_update_i_disksize(inode, new_i_size);
Aneesh Kumar K.Vcf17fea2008-09-13 13:06:18 -04003098 /* We need to mark inode dirty even if
3099 * new_i_size is less that inode->i_size
3100 * bu greater than i_disksize.(hint delalloc)
3101 */
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07003102 ret = ext4_mark_inode_dirty(handle, inode);
Alex Tomas64769242008-07-11 19:27:31 -04003103 }
Mingming Cao632eaea2008-07-11 19:27:31 -04003104 }
Tao Ma9c3569b2012-12-10 14:05:57 -05003105
3106 if (write_mode != CONVERT_INLINE_DATA &&
3107 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3108 ext4_has_inline_data(inode))
3109 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
3110 page);
3111 else
3112 ret2 = generic_write_end(file, mapping, pos, len, copied,
Alex Tomas64769242008-07-11 19:27:31 -04003113 page, fsdata);
Tao Ma9c3569b2012-12-10 14:05:57 -05003114
Alex Tomas64769242008-07-11 19:27:31 -04003115 copied = ret2;
3116 if (ret2 < 0)
3117 ret = ret2;
3118 ret2 = ext4_journal_stop(handle);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07003119 if (unlikely(ret2 && !ret))
Alex Tomas64769242008-07-11 19:27:31 -04003120 ret = ret2;
3121
3122 return ret ? ret : copied;
3123}
3124
Theodore Ts'occd25062009-02-26 01:04:07 -05003125/*
3126 * Force all delayed allocation blocks to be allocated for a given inode.
3127 */
3128int ext4_alloc_da_blocks(struct inode *inode)
3129{
Theodore Ts'ofb40ba02009-09-16 19:30:40 -04003130 trace_ext4_alloc_da_blocks(inode);
3131
Theodore Ts'o71d4f7d2014-07-15 06:02:38 -04003132 if (!EXT4_I(inode)->i_reserved_data_blocks)
Theodore Ts'occd25062009-02-26 01:04:07 -05003133 return 0;
3134
3135 /*
3136 * We do something simple for now. The filemap_flush() will
3137 * also start triggering a write of the data blocks, which is
3138 * not strictly speaking necessary (and for users of
3139 * laptop_mode, not even desirable). However, to do otherwise
3140 * would require replicating code paths in:
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04003141 *
Theodore Ts'o20970ba2013-06-06 14:00:46 -04003142 * ext4_writepages() ->
Theodore Ts'occd25062009-02-26 01:04:07 -05003143 * write_cache_pages() ---> (via passed in callback function)
3144 * __mpage_da_writepage() -->
3145 * mpage_add_bh_to_extent()
3146 * mpage_da_map_blocks()
3147 *
3148 * The problem is that write_cache_pages(), located in
3149 * mm/page-writeback.c, marks pages clean in preparation for
3150 * doing I/O, which is not desirable if we're not planning on
3151 * doing I/O at all.
3152 *
3153 * We could call write_cache_pages(), and then redirty all of
Wu Fengguang380cf092010-11-11 19:23:29 +08003154 * the pages by calling redirty_page_for_writepage() but that
Theodore Ts'occd25062009-02-26 01:04:07 -05003155 * would be ugly in the extreme. So instead we would need to
3156 * replicate parts of the code in the above functions,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003157 * simplifying them because we wouldn't actually intend to
Theodore Ts'occd25062009-02-26 01:04:07 -05003158 * write out the pages, but rather only collect contiguous
3159 * logical block extents, call the multi-block allocator, and
3160 * then update the buffer heads with the block allocations.
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04003161 *
Theodore Ts'occd25062009-02-26 01:04:07 -05003162 * For now, though, we'll cheat by calling filemap_flush(),
3163 * which will map the blocks, and start the I/O, but not
3164 * actually wait for the I/O to complete.
3165 */
3166 return filemap_flush(inode->i_mapping);
3167}
Alex Tomas64769242008-07-11 19:27:31 -04003168
3169/*
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003170 * bmap() is special. It gets used by applications such as lilo and by
3171 * the swapper to find the on-disk block of a specific piece of data.
3172 *
3173 * Naturally, this is dangerous if the block concerned is still in the
Mingming Cao617ba132006-10-11 01:20:53 -07003174 * journal. If somebody makes a swapfile on an ext4 data-journaling
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003175 * filesystem and enables swap, then they may get a nasty shock when the
3176 * data getting swapped to that swapfile suddenly gets overwritten by
3177 * the original zero's written out previously to the journal and
3178 * awaiting writeback in the kernel's buffer cache.
3179 *
3180 * So, if we see any bmap calls here on a modified, data-journaled file,
3181 * take extra steps to flush any blocks which might be in the cache.
3182 */
Mingming Cao617ba132006-10-11 01:20:53 -07003183static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003184{
3185 struct inode *inode = mapping->host;
3186 journal_t *journal;
3187 int err;
3188
Tao Ma46c7f252012-12-10 14:04:52 -05003189 /*
3190 * We can get here for an inline file via the FIBMAP ioctl
3191 */
3192 if (ext4_has_inline_data(inode))
3193 return 0;
3194
Alex Tomas64769242008-07-11 19:27:31 -04003195 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3196 test_opt(inode->i_sb, DELALLOC)) {
3197 /*
3198 * With delalloc we want to sync the file
3199 * so that we can make sure we allocate
3200 * blocks for file
3201 */
3202 filemap_write_and_wait(mapping);
3203 }
3204
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05003205 if (EXT4_JOURNAL(inode) &&
3206 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003207 /*
3208 * This is a REALLY heavyweight approach, but the use of
3209 * bmap on dirty files is expected to be extremely rare:
3210 * only if we run lilo or swapon on a freshly made file
3211 * do we expect this to happen.
3212 *
3213 * (bmap requires CAP_SYS_RAWIO so this does not
3214 * represent an unprivileged user DOS attack --- we'd be
3215 * in trouble if mortal users could trigger this path at
3216 * will.)
3217 *
Mingming Cao617ba132006-10-11 01:20:53 -07003218 * NB. EXT4_STATE_JDATA is not set on files other than
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003219 * regular files. If somebody wants to bmap a directory
3220 * or symlink and gets confused because the buffer
3221 * hasn't yet been flushed to disk, they deserve
3222 * everything they get.
3223 */
3224
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05003225 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
Mingming Cao617ba132006-10-11 01:20:53 -07003226 journal = EXT4_JOURNAL(inode);
Mingming Caodab291a2006-10-11 01:21:01 -07003227 jbd2_journal_lock_updates(journal);
3228 err = jbd2_journal_flush(journal);
3229 jbd2_journal_unlock_updates(journal);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003230
3231 if (err)
3232 return 0;
3233 }
3234
Ritesh Harjaniac58e4f2020-02-28 14:56:56 +05303235 return iomap_bmap(mapping, block, &ext4_iomap_ops);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003236}
3237
Mingming Cao617ba132006-10-11 01:20:53 -07003238static int ext4_readpage(struct file *file, struct page *page)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003239{
Tao Ma46c7f252012-12-10 14:04:52 -05003240 int ret = -EAGAIN;
3241 struct inode *inode = page->mapping->host;
3242
Jiaying Zhang0562e0b2011-03-21 21:38:05 -04003243 trace_ext4_readpage(page);
Tao Ma46c7f252012-12-10 14:04:52 -05003244
3245 if (ext4_has_inline_data(inode))
3246 ret = ext4_readpage_inline(inode, page);
3247
3248 if (ret == -EAGAIN)
Matthew Wilcox (Oracle)a07f6242020-06-01 21:47:20 -07003249 return ext4_mpage_readpages(inode, NULL, page);
Tao Ma46c7f252012-12-10 14:04:52 -05003250
3251 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003252}
3253
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003254static void ext4_readahead(struct readahead_control *rac)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003255{
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003256 struct inode *inode = rac->mapping->host;
Tao Ma46c7f252012-12-10 14:04:52 -05003257
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003258 /* If the file has inline data, no need to do readahead. */
Tao Ma46c7f252012-12-10 14:04:52 -05003259 if (ext4_has_inline_data(inode))
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003260 return;
Tao Ma46c7f252012-12-10 14:04:52 -05003261
Matthew Wilcox (Oracle)a07f6242020-06-01 21:47:20 -07003262 ext4_mpage_readpages(inode, rac, NULL);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003263}
3264
Lukas Czernerd47992f2013-05-21 23:17:23 -04003265static void ext4_invalidatepage(struct page *page, unsigned int offset,
3266 unsigned int length)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003267{
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003268 trace_ext4_invalidatepage(page, offset, length);
Jiaying Zhang0562e0b2011-03-21 21:38:05 -04003269
Jan Kara4520fb32012-12-25 13:28:54 -05003270 /* No journalling happens on data buffers when this function is used */
3271 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3272
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003273 block_invalidatepage(page, offset, length);
Jan Kara4520fb32012-12-25 13:28:54 -05003274}
3275
Jan Kara53e87262012-12-25 13:29:52 -05003276static int __ext4_journalled_invalidatepage(struct page *page,
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003277 unsigned int offset,
3278 unsigned int length)
Jan Kara4520fb32012-12-25 13:28:54 -05003279{
3280 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3281
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003282 trace_ext4_journalled_invalidatepage(page, offset, length);
Jan Kara4520fb32012-12-25 13:28:54 -05003283
Jiaying Zhang744692d2010-03-04 16:14:02 -05003284 /*
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003285 * If it's a full truncate we just forget about the pending dirtying
3286 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003287 if (offset == 0 && length == PAGE_SIZE)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003288 ClearPageChecked(page);
3289
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003290 return jbd2_journal_invalidatepage(journal, page, offset, length);
Jan Kara53e87262012-12-25 13:29:52 -05003291}
3292
3293/* Wrapper for aops... */
3294static void ext4_journalled_invalidatepage(struct page *page,
Lukas Czernerd47992f2013-05-21 23:17:23 -04003295 unsigned int offset,
3296 unsigned int length)
Jan Kara53e87262012-12-25 13:29:52 -05003297{
Lukas Czernerca99fdd2013-05-21 23:25:01 -04003298 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003299}
3300
Mingming Cao617ba132006-10-11 01:20:53 -07003301static int ext4_releasepage(struct page *page, gfp_t wait)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003302{
Mingming Cao617ba132006-10-11 01:20:53 -07003303 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003304
Jiaying Zhang0562e0b2011-03-21 21:38:05 -04003305 trace_ext4_releasepage(page);
3306
Jan Karae1c36592013-03-10 22:19:00 -04003307 /* Page has dirty journalled data -> cannot release */
3308 if (PageChecked(page))
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003309 return 0;
Frank Mayhar03901312009-01-07 00:06:22 -05003310 if (journal)
zhangyi (F)529a7812020-06-20 10:54:27 +08003311 return jbd2_journal_try_to_free_buffers(journal, page);
Frank Mayhar03901312009-01-07 00:06:22 -05003312 else
3313 return try_to_free_buffers(page);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003314}
3315
Jan Karab8a61762017-11-01 16:36:45 +01003316static bool ext4_inode_datasync_dirty(struct inode *inode)
3317{
3318 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3319
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07003320 if (journal) {
3321 if (jbd2_transaction_committed(journal,
Andrea Righid0520df2020-10-26 21:49:13 -07003322 EXT4_I(inode)->i_datasync_tid))
3323 return false;
3324 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
Harshad Shirwadkar1ceecb52020-11-05 19:59:06 -08003325 return !list_empty(&EXT4_I(inode)->i_fc_list);
Andrea Righid0520df2020-10-26 21:49:13 -07003326 return true;
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07003327 }
3328
Jan Karab8a61762017-11-01 16:36:45 +01003329 /* Any metadata buffers to write? */
3330 if (!list_empty(&inode->i_mapping->private_list))
3331 return true;
3332 return inode->i_state & I_DIRTY_DATASYNC;
3333}
3334
Matthew Bobrowskic8fdfe292019-11-05 22:59:56 +11003335static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3336 struct ext4_map_blocks *map, loff_t offset,
3337 loff_t length)
Jan Kara364443c2016-11-20 17:36:06 -05003338{
Matthew Bobrowskic8fdfe292019-11-05 22:59:56 +11003339 u8 blkbits = inode->i_blkbits;
3340
3341 /*
3342 * Writes that span EOF might trigger an I/O size update on completion,
3343 * so consider them to be dirty for the purpose of O_DSYNC, even if
3344 * there is no other metadata changes being made or are pending.
3345 */
3346 iomap->flags = 0;
3347 if (ext4_inode_datasync_dirty(inode) ||
3348 offset + length > i_size_read(inode))
3349 iomap->flags |= IOMAP_F_DIRTY;
3350
3351 if (map->m_flags & EXT4_MAP_NEW)
3352 iomap->flags |= IOMAP_F_NEW;
3353
3354 iomap->bdev = inode->i_sb->s_bdev;
3355 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3356 iomap->offset = (u64) map->m_lblk << blkbits;
3357 iomap->length = (u64) map->m_len << blkbits;
3358
Ritesh Harjani63867222020-02-28 14:56:54 +05303359 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3360 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3361 iomap->flags |= IOMAP_F_MERGED;
3362
Matthew Bobrowskic8fdfe292019-11-05 22:59:56 +11003363 /*
3364 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3365 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3366 * set. In order for any allocated unwritten extents to be converted
3367 * into written extents correctly within the ->end_io() handler, we
3368 * need to ensure that the iomap->type is set appropriately. Hence, the
3369 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3370 * been set first.
3371 */
3372 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3373 iomap->type = IOMAP_UNWRITTEN;
3374 iomap->addr = (u64) map->m_pblk << blkbits;
3375 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3376 iomap->type = IOMAP_MAPPED;
3377 iomap->addr = (u64) map->m_pblk << blkbits;
3378 } else {
3379 iomap->type = IOMAP_HOLE;
3380 iomap->addr = IOMAP_NULL_ADDR;
3381 }
3382}
3383
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003384static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3385 unsigned int flags)
3386{
3387 handle_t *handle;
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003388 u8 blkbits = inode->i_blkbits;
3389 int ret, dio_credits, m_flags = 0, retries = 0;
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003390
3391 /*
3392 * Trim the mapping request to the maximum value that we can map at
3393 * once for direct I/O.
3394 */
3395 if (map->m_len > DIO_MAX_BLOCKS)
3396 map->m_len = DIO_MAX_BLOCKS;
3397 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3398
3399retry:
3400 /*
3401 * Either we allocate blocks and then don't get an unwritten extent, so
3402 * in that case we have reserved enough credits. Or, the blocks are
3403 * already allocated and unwritten. In that case, the extent conversion
3404 * fits into the credits as well.
3405 */
3406 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3407 if (IS_ERR(handle))
3408 return PTR_ERR(handle);
3409
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003410 /*
3411 * DAX and direct I/O are the only two operations that are currently
3412 * supported with IOMAP_WRITE.
3413 */
3414 WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
3415 if (IS_DAX(inode))
3416 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3417 /*
3418 * We use i_size instead of i_disksize here because delalloc writeback
3419 * can complete at any point during the I/O and subsequently push the
3420 * i_disksize out to i_size. This could be beyond where direct I/O is
3421 * happening and thus expose allocated blocks to direct I/O reads.
3422 */
3423 else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
3424 m_flags = EXT4_GET_BLOCKS_CREATE;
3425 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3426 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3427
3428 ret = ext4_map_blocks(handle, inode, map, m_flags);
3429
3430 /*
3431 * We cannot fill holes in indirect tree based inodes as that could
3432 * expose stale data in the case of a crash. Use the magic error code
3433 * to fallback to buffered I/O.
3434 */
3435 if (!m_flags && !ret)
3436 ret = -ENOTBLK;
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003437
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003438 ext4_journal_stop(handle);
3439 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3440 goto retry;
3441
3442 return ret;
3443}
3444
3445
Jan Kara364443c2016-11-20 17:36:06 -05003446static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
Goldwyn Rodriguesc039b992019-10-18 16:44:10 -07003447 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
Jan Kara364443c2016-11-20 17:36:06 -05003448{
Jan Kara364443c2016-11-20 17:36:06 -05003449 int ret;
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003450 struct ext4_map_blocks map;
3451 u8 blkbits = inode->i_blkbits;
Jan Kara364443c2016-11-20 17:36:06 -05003452
Theodore Ts'obcd8e912018-09-01 12:45:04 -04003453 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3454 return -EINVAL;
Andreas Gruenbacher7046ae32017-10-01 17:57:54 -04003455
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003456 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3457 return -ERANGE;
Jan Kara364443c2016-11-20 17:36:06 -05003458
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003459 /*
3460 * Calculate the first and last logical blocks respectively.
3461 */
3462 map.m_lblk = offset >> blkbits;
3463 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3464 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
Jan Kara364443c2016-11-20 17:36:06 -05003465
Ritesh Harjani9faac622020-09-18 10:36:35 +05303466 if (flags & IOMAP_WRITE) {
3467 /*
3468 * We check here if the blocks are already allocated, then we
3469 * don't need to start a journal txn and we can directly return
3470 * the mapping information. This could boost performance
3471 * especially in multi-threaded overwrite requests.
3472 */
3473 if (offset + length <= i_size_read(inode)) {
3474 ret = ext4_map_blocks(NULL, inode, &map, 0);
3475 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3476 goto out;
3477 }
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003478 ret = ext4_iomap_alloc(inode, &map, flags);
Ritesh Harjani9faac622020-09-18 10:36:35 +05303479 } else {
Jan Kara776722e2016-11-20 18:09:11 -05003480 ret = ext4_map_blocks(NULL, inode, &map, 0);
Ritesh Harjani9faac622020-09-18 10:36:35 +05303481 }
Christoph Hellwig545052e2017-10-01 17:58:54 -04003482
Matthew Bobrowskif063db52019-11-05 23:00:14 +11003483 if (ret < 0)
3484 return ret;
Ritesh Harjani9faac622020-09-18 10:36:35 +05303485out:
Matthew Bobrowskic8fdfe292019-11-05 22:59:56 +11003486 ext4_set_iomap(inode, iomap, &map, offset, length);
Christoph Hellwig545052e2017-10-01 17:58:54 -04003487
Jan Kara364443c2016-11-20 17:36:06 -05003488 return 0;
3489}
3490
Jan Kara8cd115b2019-12-18 18:44:33 +01003491static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3492 loff_t length, unsigned flags, struct iomap *iomap,
3493 struct iomap *srcmap)
3494{
3495 int ret;
3496
3497 /*
3498 * Even for writes we don't need to allocate blocks, so just pretend
3499 * we are reading to save overhead of starting a transaction.
3500 */
3501 flags &= ~IOMAP_WRITE;
3502 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3503 WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
3504 return ret;
3505}
3506
Jan Kara776722e2016-11-20 18:09:11 -05003507static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3508 ssize_t written, unsigned flags, struct iomap *iomap)
3509{
Jan Kara776722e2016-11-20 18:09:11 -05003510 /*
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003511 * Check to see whether an error occurred while writing out the data to
3512 * the allocated blocks. If so, return the magic error code so that we
3513 * fallback to buffered I/O and attempt to complete the remainder of
3514 * the I/O. Any blocks that may have been allocated in preparation for
3515 * the direct I/O will be reused during buffered I/O.
Jan Kara776722e2016-11-20 18:09:11 -05003516 */
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003517 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3518 return -ENOTBLK;
Jan Kara776722e2016-11-20 18:09:11 -05003519
Matthew Bobrowski569342d2019-11-05 23:01:51 +11003520 return 0;
Jan Kara776722e2016-11-20 18:09:11 -05003521}
3522
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08003523const struct iomap_ops ext4_iomap_ops = {
Jan Kara364443c2016-11-20 17:36:06 -05003524 .iomap_begin = ext4_iomap_begin,
Jan Kara776722e2016-11-20 18:09:11 -05003525 .iomap_end = ext4_iomap_end,
Jan Kara364443c2016-11-20 17:36:06 -05003526};
3527
Jan Kara8cd115b2019-12-18 18:44:33 +01003528const struct iomap_ops ext4_iomap_overwrite_ops = {
3529 .iomap_begin = ext4_iomap_overwrite_begin,
3530 .iomap_end = ext4_iomap_end,
3531};
3532
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003533static bool ext4_iomap_is_delalloc(struct inode *inode,
3534 struct ext4_map_blocks *map)
Mingming Cao4c0425f2009-09-28 15:48:41 -04003535{
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003536 struct extent_status es;
3537 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
Mingming Cao4c0425f2009-09-28 15:48:41 -04003538
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003539 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3540 map->m_lblk, end, &es);
Mingming4b70df12009-11-03 14:44:54 -05003541
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003542 if (!es.es_len || es.es_lblk > end)
3543 return false;
3544
3545 if (es.es_lblk > map->m_lblk) {
3546 map->m_len = es.es_lblk - map->m_lblk;
3547 return false;
3548 }
3549
3550 offset = map->m_lblk - es.es_lblk;
3551 map->m_len = es.es_len - offset;
3552
3553 return true;
3554}
3555
3556static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3557 loff_t length, unsigned int flags,
3558 struct iomap *iomap, struct iomap *srcmap)
3559{
3560 int ret;
3561 bool delalloc = false;
3562 struct ext4_map_blocks map;
3563 u8 blkbits = inode->i_blkbits;
3564
3565 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3566 return -EINVAL;
3567
3568 if (ext4_has_inline_data(inode)) {
3569 ret = ext4_inline_data_iomap(inode, iomap);
3570 if (ret != -EAGAIN) {
3571 if (ret == 0 && offset >= iomap->length)
3572 ret = -ENOENT;
3573 return ret;
3574 }
3575 }
Mingming Cao8d5d02e2009-09-28 15:48:29 -04003576
Jan Kara74c66bc2016-02-29 08:36:38 +11003577 /*
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003578 * Calculate the first and last logical block respectively.
Jan Kara74c66bc2016-02-29 08:36:38 +11003579 */
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003580 map.m_lblk = offset >> blkbits;
3581 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3582 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3583
Ritesh Harjanib2c57642020-02-28 14:56:57 +05303584 /*
3585 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3586 * So handle it here itself instead of querying ext4_map_blocks().
3587 * Since ext4_map_blocks() will warn about it and will return
3588 * -EIO error.
3589 */
3590 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3591 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3592
3593 if (offset >= sbi->s_bitmap_maxbytes) {
3594 map.m_flags = 0;
3595 goto set_iomap;
3596 }
3597 }
3598
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003599 ret = ext4_map_blocks(NULL, inode, &map, 0);
3600 if (ret < 0)
3601 return ret;
3602 if (ret == 0)
3603 delalloc = ext4_iomap_is_delalloc(inode, &map);
3604
Ritesh Harjanib2c57642020-02-28 14:56:57 +05303605set_iomap:
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003606 ext4_set_iomap(inode, iomap, &map, offset, length);
3607 if (delalloc && iomap->type == IOMAP_HOLE)
3608 iomap->type = IOMAP_DELALLOC;
Christoph Hellwig187372a2016-02-08 14:40:51 +11003609
3610 return 0;
Mingming Cao4c0425f2009-09-28 15:48:41 -04003611}
Jiaying Zhangc7064ef2010-03-02 13:28:44 -05003612
Matthew Bobrowski09edf4d2019-11-05 23:03:31 +11003613const struct iomap_ops ext4_iomap_report_ops = {
3614 .iomap_begin = ext4_iomap_begin_report,
3615};
Mingming Cao4c0425f2009-09-28 15:48:41 -04003616
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003617/*
Mingming Cao617ba132006-10-11 01:20:53 -07003618 * Pages can be marked dirty completely asynchronously from ext4's journalling
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003619 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3620 * much here because ->set_page_dirty is called under VFS locks. The page is
3621 * not necessarily locked.
3622 *
3623 * We cannot just dirty the page and leave attached buffers clean, because the
3624 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3625 * or jbddirty because all the journalling code will explode.
3626 *
3627 * So what we do is to mark the page "pending dirty" and next time writepage
3628 * is called, propagate that into the buffers appropriately.
3629 */
Mingming Cao617ba132006-10-11 01:20:53 -07003630static int ext4_journalled_set_page_dirty(struct page *page)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003631{
3632 SetPageChecked(page);
3633 return __set_page_dirty_nobuffers(page);
3634}
3635
Jan Kara6dcc6932016-12-01 11:46:40 -05003636static int ext4_set_page_dirty(struct page *page)
3637{
3638 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3639 WARN_ON_ONCE(!page_has_buffers(page));
3640 return __set_page_dirty_buffers(page);
3641}
3642
Ritesh Harjani0e6895b2020-09-04 14:46:53 +05303643static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3644 struct file *file, sector_t *span)
3645{
3646 return iomap_swapfile_activate(sis, file, span,
3647 &ext4_iomap_report_ops);
3648}
3649
Theodore Ts'o74d553a2013-04-03 12:39:17 -04003650static const struct address_space_operations ext4_aops = {
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003651 .readpage = ext4_readpage,
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003652 .readahead = ext4_readahead,
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04003653 .writepage = ext4_writepage,
Theodore Ts'o20970ba2013-06-06 14:00:46 -04003654 .writepages = ext4_writepages,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003655 .write_begin = ext4_write_begin,
Theodore Ts'o74d553a2013-04-03 12:39:17 -04003656 .write_end = ext4_write_end,
Jan Kara6dcc6932016-12-01 11:46:40 -05003657 .set_page_dirty = ext4_set_page_dirty,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003658 .bmap = ext4_bmap,
3659 .invalidatepage = ext4_invalidatepage,
3660 .releasepage = ext4_releasepage,
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003661 .direct_IO = noop_direct_IO,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003662 .migratepage = buffer_migrate_page,
3663 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02003664 .error_remove_page = generic_error_remove_page,
Ritesh Harjani0e6895b2020-09-04 14:46:53 +05303665 .swap_activate = ext4_iomap_swap_activate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003666};
3667
Mingming Cao617ba132006-10-11 01:20:53 -07003668static const struct address_space_operations ext4_journalled_aops = {
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003669 .readpage = ext4_readpage,
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003670 .readahead = ext4_readahead,
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04003671 .writepage = ext4_writepage,
Theodore Ts'o20970ba2013-06-06 14:00:46 -04003672 .writepages = ext4_writepages,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003673 .write_begin = ext4_write_begin,
3674 .write_end = ext4_journalled_write_end,
3675 .set_page_dirty = ext4_journalled_set_page_dirty,
3676 .bmap = ext4_bmap,
Jan Kara4520fb32012-12-25 13:28:54 -05003677 .invalidatepage = ext4_journalled_invalidatepage,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003678 .releasepage = ext4_releasepage,
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003679 .direct_IO = noop_direct_IO,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003680 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02003681 .error_remove_page = generic_error_remove_page,
Ritesh Harjani0e6895b2020-09-04 14:46:53 +05303682 .swap_activate = ext4_iomap_swap_activate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003683};
3684
Alex Tomas64769242008-07-11 19:27:31 -04003685static const struct address_space_operations ext4_da_aops = {
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003686 .readpage = ext4_readpage,
Matthew Wilcox (Oracle)6311f91f2020-06-01 21:47:16 -07003687 .readahead = ext4_readahead,
Aneesh Kumar K.V43ce1d22009-06-14 17:58:45 -04003688 .writepage = ext4_writepage,
Theodore Ts'o20970ba2013-06-06 14:00:46 -04003689 .writepages = ext4_writepages,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003690 .write_begin = ext4_da_write_begin,
3691 .write_end = ext4_da_write_end,
Jan Kara6dcc6932016-12-01 11:46:40 -05003692 .set_page_dirty = ext4_set_page_dirty,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003693 .bmap = ext4_bmap,
Eric Whitney8fcc3a52019-08-22 23:22:14 -04003694 .invalidatepage = ext4_invalidatepage,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003695 .releasepage = ext4_releasepage,
Matthew Bobrowski378f32b2019-11-05 23:02:39 +11003696 .direct_IO = noop_direct_IO,
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07003697 .migratepage = buffer_migrate_page,
3698 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02003699 .error_remove_page = generic_error_remove_page,
Ritesh Harjani0e6895b2020-09-04 14:46:53 +05303700 .swap_activate = ext4_iomap_swap_activate,
Alex Tomas64769242008-07-11 19:27:31 -04003701};
3702
Dan Williams5f0663b2017-12-21 12:25:11 -08003703static const struct address_space_operations ext4_dax_aops = {
3704 .writepages = ext4_dax_writepages,
3705 .direct_IO = noop_direct_IO,
3706 .set_page_dirty = noop_set_page_dirty,
Toshi Kani94dbb632018-09-15 21:23:41 -04003707 .bmap = ext4_bmap,
Dan Williams5f0663b2017-12-21 12:25:11 -08003708 .invalidatepage = noop_invalidatepage,
Ritesh Harjani0e6895b2020-09-04 14:46:53 +05303709 .swap_activate = ext4_iomap_swap_activate,
Dan Williams5f0663b2017-12-21 12:25:11 -08003710};
3711
Mingming Cao617ba132006-10-11 01:20:53 -07003712void ext4_set_aops(struct inode *inode)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003713{
Lukas Czerner3d2b1582012-02-20 17:53:00 -05003714 switch (ext4_inode_journal_mode(inode)) {
3715 case EXT4_INODE_ORDERED_DATA_MODE:
Lukas Czerner3d2b1582012-02-20 17:53:00 -05003716 case EXT4_INODE_WRITEBACK_DATA_MODE:
Lukas Czerner3d2b1582012-02-20 17:53:00 -05003717 break;
3718 case EXT4_INODE_JOURNAL_DATA_MODE:
Mingming Cao617ba132006-10-11 01:20:53 -07003719 inode->i_mapping->a_ops = &ext4_journalled_aops;
Theodore Ts'o74d553a2013-04-03 12:39:17 -04003720 return;
Lukas Czerner3d2b1582012-02-20 17:53:00 -05003721 default:
3722 BUG();
3723 }
Dan Williams5f0663b2017-12-21 12:25:11 -08003724 if (IS_DAX(inode))
3725 inode->i_mapping->a_ops = &ext4_dax_aops;
3726 else if (test_opt(inode->i_sb, DELALLOC))
Theodore Ts'o74d553a2013-04-03 12:39:17 -04003727 inode->i_mapping->a_ops = &ext4_da_aops;
3728 else
3729 inode->i_mapping->a_ops = &ext4_aops;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003730}
3731
Ross Zwisler923ae0f2015-02-16 15:59:38 -08003732static int __ext4_block_zero_page_range(handle_t *handle,
Lukas Czernerd863dc32013-05-27 23:32:35 -04003733 struct address_space *mapping, loff_t from, loff_t length)
3734{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003735 ext4_fsblk_t index = from >> PAGE_SHIFT;
3736 unsigned offset = from & (PAGE_SIZE-1);
Ross Zwisler923ae0f2015-02-16 15:59:38 -08003737 unsigned blocksize, pos;
Lukas Czernerd863dc32013-05-27 23:32:35 -04003738 ext4_lblk_t iblock;
3739 struct inode *inode = mapping->host;
3740 struct buffer_head *bh;
3741 struct page *page;
3742 int err = 0;
3743
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003744 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
Michal Hockoc62d2552015-11-06 16:28:49 -08003745 mapping_gfp_constraint(mapping, ~__GFP_FS));
Lukas Czernerd863dc32013-05-27 23:32:35 -04003746 if (!page)
3747 return -ENOMEM;
3748
3749 blocksize = inode->i_sb->s_blocksize;
Lukas Czernerd863dc32013-05-27 23:32:35 -04003750
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003751 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
Lukas Czernerd863dc32013-05-27 23:32:35 -04003752
3753 if (!page_has_buffers(page))
3754 create_empty_buffers(page, blocksize, 0);
3755
3756 /* Find the buffer that contains "offset" */
3757 bh = page_buffers(page);
3758 pos = blocksize;
3759 while (offset >= pos) {
3760 bh = bh->b_this_page;
3761 iblock++;
3762 pos += blocksize;
3763 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003764 if (buffer_freed(bh)) {
3765 BUFFER_TRACE(bh, "freed: skip");
3766 goto unlock;
3767 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003768 if (!buffer_mapped(bh)) {
3769 BUFFER_TRACE(bh, "unmapped");
3770 ext4_get_block(inode, iblock, bh, 0);
3771 /* unmapped? It's a hole - nothing to do */
3772 if (!buffer_mapped(bh)) {
3773 BUFFER_TRACE(bh, "still unmapped");
3774 goto unlock;
3775 }
3776 }
3777
3778 /* Ok, it's mapped. Make sure it's up-to-date */
3779 if (PageUptodate(page))
3780 set_buffer_uptodate(bh);
3781
3782 if (!buffer_uptodate(bh)) {
zhangyi (F)2d069c02020-09-24 15:33:33 +08003783 err = ext4_read_bh_lock(bh, 0, true);
3784 if (err)
Lukas Czernerd863dc32013-05-27 23:32:35 -04003785 goto unlock;
Eric Biggers4f74d152020-07-02 01:56:07 +00003786 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
Michael Halcrowc9c74292015-04-12 00:56:10 -04003787 /* We expect the key to be set. */
Jaegeuk Kima7550b32016-07-10 14:01:03 -04003788 BUG_ON(!fscrypt_has_encryption_key(inode));
Eric Biggers834f1562019-12-26 09:41:05 -06003789 err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
3790 bh_offset(bh));
3791 if (err) {
3792 clear_buffer_uptodate(bh);
3793 goto unlock;
3794 }
Michael Halcrowc9c74292015-04-12 00:56:10 -04003795 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003796 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003797 if (ext4_should_journal_data(inode)) {
3798 BUFFER_TRACE(bh, "get write access");
3799 err = ext4_journal_get_write_access(handle, bh);
3800 if (err)
3801 goto unlock;
3802 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003803 zero_user(page, offset, length);
Lukas Czernerd863dc32013-05-27 23:32:35 -04003804 BUFFER_TRACE(bh, "zeroed end of block");
3805
Lukas Czernerd863dc32013-05-27 23:32:35 -04003806 if (ext4_should_journal_data(inode)) {
3807 err = ext4_handle_dirty_metadata(handle, inode, bh);
Lukas Czerner0713ed02013-05-27 23:32:35 -04003808 } else {
jon ernst353eefd2013-07-01 08:12:39 -04003809 err = 0;
Lukas Czernerd863dc32013-05-27 23:32:35 -04003810 mark_buffer_dirty(bh);
Jan Kara3957ef52016-04-24 00:56:05 -04003811 if (ext4_should_order_data(inode))
Ross Zwisler73131fb2019-06-20 17:26:26 -04003812 err = ext4_jbd2_inode_add_write(handle, inode, from,
3813 length);
Lukas Czerner0713ed02013-05-27 23:32:35 -04003814 }
Lukas Czernerd863dc32013-05-27 23:32:35 -04003815
3816unlock:
3817 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003818 put_page(page);
Lukas Czernerd863dc32013-05-27 23:32:35 -04003819 return err;
3820}
3821
Matthew Wilcox94350ab2014-03-24 15:09:16 -04003822/*
Ross Zwisler923ae0f2015-02-16 15:59:38 -08003823 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3824 * starting from file offset 'from'. The range to be zero'd must
3825 * be contained with in one block. If the specified range exceeds
3826 * the end of the block it will be shortened to end of the block
3827 * that cooresponds to 'from'
3828 */
3829static int ext4_block_zero_page_range(handle_t *handle,
3830 struct address_space *mapping, loff_t from, loff_t length)
3831{
3832 struct inode *inode = mapping->host;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003833 unsigned offset = from & (PAGE_SIZE-1);
Ross Zwisler923ae0f2015-02-16 15:59:38 -08003834 unsigned blocksize = inode->i_sb->s_blocksize;
3835 unsigned max = blocksize - (offset & (blocksize - 1));
3836
3837 /*
3838 * correct length if it does not fall between
3839 * 'from' and the end of the block
3840 */
3841 if (length > max || length < 0)
3842 length = max;
3843
Jan Kara47e69352016-11-20 18:08:05 -05003844 if (IS_DAX(inode)) {
3845 return iomap_zero_range(inode, from, length, NULL,
3846 &ext4_iomap_ops);
3847 }
Ross Zwisler923ae0f2015-02-16 15:59:38 -08003848 return __ext4_block_zero_page_range(handle, mapping, from, length);
3849}
3850
3851/*
Matthew Wilcox94350ab2014-03-24 15:09:16 -04003852 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3853 * up to the end of the block which corresponds to `from'.
3854 * This required during truncate. We need to physically zero the tail end
3855 * of that block so it doesn't yield old data if the file is later grown.
3856 */
Stephen Hemmingerc1978552014-05-12 10:50:23 -04003857static int ext4_block_truncate_page(handle_t *handle,
Matthew Wilcox94350ab2014-03-24 15:09:16 -04003858 struct address_space *mapping, loff_t from)
3859{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003860 unsigned offset = from & (PAGE_SIZE-1);
Matthew Wilcox94350ab2014-03-24 15:09:16 -04003861 unsigned length;
3862 unsigned blocksize;
3863 struct inode *inode = mapping->host;
3864
Theodore Ts'o0d068632017-02-14 11:31:15 -05003865 /* If we are processing an encrypted inode during orphan list handling */
Chandan Rajendra592ddec2018-12-12 15:20:10 +05303866 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
Theodore Ts'o0d068632017-02-14 11:31:15 -05003867 return 0;
3868
Matthew Wilcox94350ab2014-03-24 15:09:16 -04003869 blocksize = inode->i_sb->s_blocksize;
3870 length = blocksize - (offset & (blocksize - 1));
3871
3872 return ext4_block_zero_page_range(handle, mapping, from, length);
3873}
3874
Lukas Czernera87dd182013-05-27 23:32:35 -04003875int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3876 loff_t lstart, loff_t length)
3877{
3878 struct super_block *sb = inode->i_sb;
3879 struct address_space *mapping = inode->i_mapping;
Lukas Czernere1be3a92013-07-01 08:12:39 -04003880 unsigned partial_start, partial_end;
Lukas Czernera87dd182013-05-27 23:32:35 -04003881 ext4_fsblk_t start, end;
3882 loff_t byte_end = (lstart + length - 1);
3883 int err = 0;
3884
Lukas Czernere1be3a92013-07-01 08:12:39 -04003885 partial_start = lstart & (sb->s_blocksize - 1);
3886 partial_end = byte_end & (sb->s_blocksize - 1);
3887
Lukas Czernera87dd182013-05-27 23:32:35 -04003888 start = lstart >> sb->s_blocksize_bits;
3889 end = byte_end >> sb->s_blocksize_bits;
3890
3891 /* Handle partial zero within the single block */
Lukas Czernere1be3a92013-07-01 08:12:39 -04003892 if (start == end &&
3893 (partial_start || (partial_end != sb->s_blocksize - 1))) {
Lukas Czernera87dd182013-05-27 23:32:35 -04003894 err = ext4_block_zero_page_range(handle, mapping,
3895 lstart, length);
3896 return err;
3897 }
3898 /* Handle partial zero out on the start of the range */
Lukas Czernere1be3a92013-07-01 08:12:39 -04003899 if (partial_start) {
Lukas Czernera87dd182013-05-27 23:32:35 -04003900 err = ext4_block_zero_page_range(handle, mapping,
3901 lstart, sb->s_blocksize);
3902 if (err)
3903 return err;
3904 }
3905 /* Handle partial zero out on the end of the range */
Lukas Czernere1be3a92013-07-01 08:12:39 -04003906 if (partial_end != sb->s_blocksize - 1)
Lukas Czernera87dd182013-05-27 23:32:35 -04003907 err = ext4_block_zero_page_range(handle, mapping,
Lukas Czernere1be3a92013-07-01 08:12:39 -04003908 byte_end - partial_end,
3909 partial_end + 1);
Lukas Czernera87dd182013-05-27 23:32:35 -04003910 return err;
3911}
3912
Duane Griffin91ef4ca2008-07-11 19:27:31 -04003913int ext4_can_truncate(struct inode *inode)
3914{
Duane Griffin91ef4ca2008-07-11 19:27:31 -04003915 if (S_ISREG(inode->i_mode))
3916 return 1;
3917 if (S_ISDIR(inode->i_mode))
3918 return 1;
3919 if (S_ISLNK(inode->i_mode))
3920 return !ext4_inode_is_fast_symlink(inode);
3921 return 0;
3922}
3923
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003924/*
Jan Kara01127842015-12-07 14:34:49 -05003925 * We have to make sure i_disksize gets properly updated before we truncate
3926 * page cache due to hole punching or zero range. Otherwise i_disksize update
3927 * can get lost as it may have been postponed to submission of writeback but
3928 * that will never happen after we truncate page cache.
3929 */
3930int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3931 loff_t len)
3932{
3933 handle_t *handle;
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07003934 int ret;
3935
Jan Kara01127842015-12-07 14:34:49 -05003936 loff_t size = i_size_read(inode);
3937
Al Viro59551022016-01-22 15:40:57 -05003938 WARN_ON(!inode_is_locked(inode));
Jan Kara01127842015-12-07 14:34:49 -05003939 if (offset > size || offset + len < size)
3940 return 0;
3941
3942 if (EXT4_I(inode)->i_disksize >= size)
3943 return 0;
3944
3945 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3946 if (IS_ERR(handle))
3947 return PTR_ERR(handle);
3948 ext4_update_i_disksize(inode, size);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07003949 ret = ext4_mark_inode_dirty(handle, inode);
Jan Kara01127842015-12-07 14:34:49 -05003950 ext4_journal_stop(handle);
3951
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07003952 return ret;
Jan Kara01127842015-12-07 14:34:49 -05003953}
3954
Ross Zwislerb1f38212018-09-11 13:31:16 -04003955static void ext4_wait_dax_page(struct ext4_inode_info *ei)
Ross Zwisler430657b2018-07-29 17:00:22 -04003956{
Ross Zwisler430657b2018-07-29 17:00:22 -04003957 up_write(&ei->i_mmap_sem);
3958 schedule();
3959 down_write(&ei->i_mmap_sem);
3960}
3961
3962int ext4_break_layouts(struct inode *inode)
3963{
3964 struct ext4_inode_info *ei = EXT4_I(inode);
3965 struct page *page;
Ross Zwisler430657b2018-07-29 17:00:22 -04003966 int error;
3967
3968 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
3969 return -EINVAL;
3970
3971 do {
Ross Zwisler430657b2018-07-29 17:00:22 -04003972 page = dax_layout_busy_page(inode->i_mapping);
3973 if (!page)
3974 return 0;
3975
3976 error = ___wait_var_event(&page->_refcount,
3977 atomic_read(&page->_refcount) == 1,
3978 TASK_INTERRUPTIBLE, 0, 0,
Ross Zwislerb1f38212018-09-11 13:31:16 -04003979 ext4_wait_dax_page(ei));
3980 } while (error == 0);
Ross Zwisler430657b2018-07-29 17:00:22 -04003981
3982 return error;
3983}
3984
Jan Kara01127842015-12-07 14:34:49 -05003985/*
Ross Zwislercca32b72016-09-22 11:49:38 -04003986 * ext4_punch_hole: punches a hole in a file by releasing the blocks
Allison Hendersona4bb6b62011-05-25 07:41:50 -04003987 * associated with the given offset and length
3988 *
3989 * @inode: File inode
3990 * @offset: The offset where the hole will begin
3991 * @len: The length of the hole
3992 *
Anatol Pomozov4907cb72012-09-01 10:31:09 -07003993 * Returns: 0 on success or negative on failure
Allison Hendersona4bb6b62011-05-25 07:41:50 -04003994 */
3995
Ashish Sangwanaeb28172013-07-01 08:12:38 -04003996int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
Allison Hendersona4bb6b62011-05-25 07:41:50 -04003997{
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04003998 struct super_block *sb = inode->i_sb;
3999 ext4_lblk_t first_block, stop_block;
4000 struct address_space *mapping = inode->i_mapping;
Lukas Czernera87dd182013-05-27 23:32:35 -04004001 loff_t first_block_offset, last_block_offset;
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004002 handle_t *handle;
4003 unsigned int credits;
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07004004 int ret = 0, ret2 = 0;
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004005
Lukas Czernerb8a86842014-03-18 18:05:35 -04004006 trace_ext4_punch_hole(inode, offset, length, 0);
Zheng Liuaaddea82013-01-16 20:21:26 -05004007
Theodore Ts'oc1e82202019-08-23 22:38:00 -04004008 ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4009 if (ext4_has_inline_data(inode)) {
4010 down_write(&EXT4_I(inode)->i_mmap_sem);
4011 ret = ext4_convert_inline_data(inode);
4012 up_write(&EXT4_I(inode)->i_mmap_sem);
4013 if (ret)
4014 return ret;
4015 }
4016
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004017 /*
4018 * Write out all dirty pages to avoid race conditions
4019 * Then release them.
4020 */
Ross Zwislercca32b72016-09-22 11:49:38 -04004021 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004022 ret = filemap_write_and_wait_range(mapping, offset,
4023 offset + length - 1);
4024 if (ret)
4025 return ret;
4026 }
4027
Al Viro59551022016-01-22 15:40:57 -05004028 inode_lock(inode);
Lukas Czerner9ef06ce2014-04-12 09:47:00 -04004029
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004030 /* No need to punch hole beyond i_size */
4031 if (offset >= inode->i_size)
4032 goto out_mutex;
4033
4034 /*
4035 * If the hole extends beyond i_size, set the hole
4036 * to end after the page that contains i_size
4037 */
4038 if (offset + length > inode->i_size) {
4039 length = inode->i_size +
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004040 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004041 offset;
4042 }
4043
Jan Karaa3612932013-08-16 21:19:41 -04004044 if (offset & (sb->s_blocksize - 1) ||
4045 (offset + length) & (sb->s_blocksize - 1)) {
4046 /*
4047 * Attach jinode to inode for jbd2 if we do any zeroing of
4048 * partial block
4049 */
4050 ret = ext4_inode_attach_jinode(inode);
4051 if (ret < 0)
4052 goto out_mutex;
4053
4054 }
4055
Jan Karaea3d7202015-12-07 14:28:03 -05004056 /* Wait all existing dio workers, newcomers will block on i_mutex */
Jan Karaea3d7202015-12-07 14:28:03 -05004057 inode_dio_wait(inode);
4058
4059 /*
4060 * Prevent page faults from reinstantiating pages we have released from
4061 * page cache.
4062 */
4063 down_write(&EXT4_I(inode)->i_mmap_sem);
Ross Zwisler430657b2018-07-29 17:00:22 -04004064
4065 ret = ext4_break_layouts(inode);
4066 if (ret)
4067 goto out_dio;
4068
Lukas Czernera87dd182013-05-27 23:32:35 -04004069 first_block_offset = round_up(offset, sb->s_blocksize);
4070 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004071
Lukas Czernera87dd182013-05-27 23:32:35 -04004072 /* Now release the pages and zero block aligned part of pages*/
Jan Kara01127842015-12-07 14:34:49 -05004073 if (last_block_offset > first_block_offset) {
4074 ret = ext4_update_disksize_before_punch(inode, offset, length);
4075 if (ret)
4076 goto out_dio;
Lukas Czernera87dd182013-05-27 23:32:35 -04004077 truncate_pagecache_range(inode, first_block_offset,
4078 last_block_offset);
Jan Kara01127842015-12-07 14:34:49 -05004079 }
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004080
4081 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4082 credits = ext4_writepage_trans_blocks(inode);
4083 else
4084 credits = ext4_blocks_for_truncate(inode);
4085 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4086 if (IS_ERR(handle)) {
4087 ret = PTR_ERR(handle);
4088 ext4_std_error(sb, ret);
4089 goto out_dio;
4090 }
4091
Lukas Czernera87dd182013-05-27 23:32:35 -04004092 ret = ext4_zero_partial_blocks(handle, inode, offset,
4093 length);
4094 if (ret)
4095 goto out_stop;
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004096
4097 first_block = (offset + sb->s_blocksize - 1) >>
4098 EXT4_BLOCK_SIZE_BITS(sb);
4099 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4100
Lukas Czernereee597a2018-05-13 19:28:35 -04004101 /* If there are blocks to remove, do it */
4102 if (stop_block > first_block) {
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004103
Lukas Czernereee597a2018-05-13 19:28:35 -04004104 down_write(&EXT4_I(inode)->i_data_sem);
brookxu27bc4462020-08-17 15:36:15 +08004105 ext4_discard_preallocations(inode, 0);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004106
Lukas Czernereee597a2018-05-13 19:28:35 -04004107 ret = ext4_es_remove_extent(inode, first_block,
4108 stop_block - first_block);
4109 if (ret) {
4110 up_write(&EXT4_I(inode)->i_data_sem);
4111 goto out_stop;
4112 }
4113
4114 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4115 ret = ext4_ext_remove_space(inode, first_block,
4116 stop_block - 1);
4117 else
4118 ret = ext4_ind_remove_space(handle, inode, first_block,
4119 stop_block);
4120
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004121 up_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004122 }
Harshad Shirwadkara80f7fc2020-11-05 19:58:53 -08004123 ext4_fc_track_range(handle, inode, first_block, stop_block);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004124 if (IS_SYNC(inode))
4125 ext4_handle_sync(handle);
Maxim Patlasove251f9b2014-02-20 16:58:05 -05004126
Deepa Dinamanieeca7ea2016-11-14 21:40:10 -05004127 inode->i_mtime = inode->i_ctime = current_time(inode);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07004128 ret2 = ext4_mark_inode_dirty(handle, inode);
4129 if (unlikely(ret2))
4130 ret = ret2;
Jan Kara67a7d5f2017-05-29 13:24:55 -04004131 if (ret >= 0)
4132 ext4_update_inode_fsync_trans(handle, inode, 1);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004133out_stop:
4134 ext4_journal_stop(handle);
4135out_dio:
Jan Karaea3d7202015-12-07 14:28:03 -05004136 up_write(&EXT4_I(inode)->i_mmap_sem);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004137out_mutex:
Al Viro59551022016-01-22 15:40:57 -05004138 inode_unlock(inode);
Theodore Ts'o26a4c0c2013-04-03 12:45:17 -04004139 return ret;
Allison Hendersona4bb6b62011-05-25 07:41:50 -04004140}
4141
Jan Karaa3612932013-08-16 21:19:41 -04004142int ext4_inode_attach_jinode(struct inode *inode)
4143{
4144 struct ext4_inode_info *ei = EXT4_I(inode);
4145 struct jbd2_inode *jinode;
4146
4147 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4148 return 0;
4149
4150 jinode = jbd2_alloc_inode(GFP_KERNEL);
4151 spin_lock(&inode->i_lock);
4152 if (!ei->jinode) {
4153 if (!jinode) {
4154 spin_unlock(&inode->i_lock);
4155 return -ENOMEM;
4156 }
4157 ei->jinode = jinode;
4158 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4159 jinode = NULL;
4160 }
4161 spin_unlock(&inode->i_lock);
4162 if (unlikely(jinode != NULL))
4163 jbd2_free_inode(jinode);
4164 return 0;
4165}
4166
Allison Hendersona4bb6b62011-05-25 07:41:50 -04004167/*
Mingming Cao617ba132006-10-11 01:20:53 -07004168 * ext4_truncate()
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004169 *
Mingming Cao617ba132006-10-11 01:20:53 -07004170 * We block out ext4_get_block() block instantiations across the entire
4171 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004172 * simultaneously on behalf of the same inode.
4173 *
Justin P. Mattock42b2aa82011-11-28 20:31:00 -08004174 * As we work through the truncate and commit bits of it to the journal there
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004175 * is one core, guiding principle: the file's tree must always be consistent on
4176 * disk. We must be able to restart the truncate after a crash.
4177 *
4178 * The file's tree may be transiently inconsistent in memory (although it
4179 * probably isn't), but whenever we close off and commit a journal transaction,
4180 * the contents of (the filesystem + the journal) must be consistent and
4181 * restartable. It's pretty simple, really: bottom up, right to left (although
4182 * left-to-right works OK too).
4183 *
4184 * Note that at recovery time, journal replay occurs *before* the restart of
4185 * truncate against the orphan inode list.
4186 *
4187 * The committed inode has the new, desired i_size (which is the same as
Mingming Cao617ba132006-10-11 01:20:53 -07004188 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004189 * that this inode's truncate did not complete and it will again call
Mingming Cao617ba132006-10-11 01:20:53 -07004190 * ext4_truncate() to have another go. So there will be instantiated blocks
4191 * to the right of the truncation point in a crashed ext4 filesystem. But
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004192 * that's fine - as long as they are linked from the inode, the post-crash
Mingming Cao617ba132006-10-11 01:20:53 -07004193 * ext4_truncate() run will find them and release them.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004194 */
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05004195int ext4_truncate(struct inode *inode)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004196{
Theodore Ts'o819c4922013-04-03 12:47:17 -04004197 struct ext4_inode_info *ei = EXT4_I(inode);
4198 unsigned int credits;
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07004199 int err = 0, err2;
Theodore Ts'o819c4922013-04-03 12:47:17 -04004200 handle_t *handle;
4201 struct address_space *mapping = inode->i_mapping;
Theodore Ts'o819c4922013-04-03 12:47:17 -04004202
Theodore Ts'o19b5ef62013-04-03 21:58:52 -04004203 /*
4204 * There is a possibility that we're either freeing the inode
Matthew Wilcoxe04027e2014-03-24 15:15:07 -04004205 * or it's a completely new inode. In those cases we might not
Theodore Ts'o19b5ef62013-04-03 21:58:52 -04004206 * have i_mutex locked because it's not necessary.
4207 */
4208 if (!(inode->i_state & (I_NEW|I_FREEING)))
Al Viro59551022016-01-22 15:40:57 -05004209 WARN_ON(!inode_is_locked(inode));
Jiaying Zhang0562e0b2011-03-21 21:38:05 -04004210 trace_ext4_truncate_enter(inode);
4211
Duane Griffin91ef4ca2008-07-11 19:27:31 -04004212 if (!ext4_can_truncate(inode))
zhengliang9a5d2652020-07-01 16:30:27 +08004213 goto out_trace;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004214
Theodore Ts'o5534fb52009-09-17 09:34:16 -04004215 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05004216 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -05004217
Tao Maaef1c852012-12-10 14:06:02 -05004218 if (ext4_has_inline_data(inode)) {
4219 int has_inline = 1;
4220
Theodore Ts'o01daf942017-01-22 19:35:49 -05004221 err = ext4_inline_data_truncate(inode, &has_inline);
zhengliang9a5d2652020-07-01 16:30:27 +08004222 if (err || has_inline)
4223 goto out_trace;
Tao Maaef1c852012-12-10 14:06:02 -05004224 }
4225
Jan Karaa3612932013-08-16 21:19:41 -04004226 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4227 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4228 if (ext4_inode_attach_jinode(inode) < 0)
zhengliang9a5d2652020-07-01 16:30:27 +08004229 goto out_trace;
Jan Karaa3612932013-08-16 21:19:41 -04004230 }
4231
Amir Goldsteinff9893d2011-06-27 16:36:31 -04004232 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
Theodore Ts'o819c4922013-04-03 12:47:17 -04004233 credits = ext4_writepage_trans_blocks(inode);
Amir Goldsteinff9893d2011-06-27 16:36:31 -04004234 else
Theodore Ts'o819c4922013-04-03 12:47:17 -04004235 credits = ext4_blocks_for_truncate(inode);
4236
4237 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
zhengliang9a5d2652020-07-01 16:30:27 +08004238 if (IS_ERR(handle)) {
4239 err = PTR_ERR(handle);
4240 goto out_trace;
4241 }
Theodore Ts'o819c4922013-04-03 12:47:17 -04004242
Lukas Czernereb3544c2013-05-27 23:32:35 -04004243 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4244 ext4_block_truncate_page(handle, mapping, inode->i_size);
Theodore Ts'o819c4922013-04-03 12:47:17 -04004245
4246 /*
4247 * We add the inode to the orphan list, so that if this
4248 * truncate spans multiple transactions, and we crash, we will
4249 * resume the truncate when the filesystem recovers. It also
4250 * marks the inode dirty, to catch the new size.
4251 *
4252 * Implication: the file must always be in a sane, consistent
4253 * truncatable state while each transaction commits.
4254 */
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05004255 err = ext4_orphan_add(handle, inode);
4256 if (err)
Theodore Ts'o819c4922013-04-03 12:47:17 -04004257 goto out_stop;
4258
4259 down_write(&EXT4_I(inode)->i_data_sem);
4260
brookxu27bc4462020-08-17 15:36:15 +08004261 ext4_discard_preallocations(inode, 0);
Theodore Ts'o819c4922013-04-03 12:47:17 -04004262
4263 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
Theodore Ts'od0abb362016-11-13 22:02:28 -05004264 err = ext4_ext_truncate(handle, inode);
Theodore Ts'o819c4922013-04-03 12:47:17 -04004265 else
4266 ext4_ind_truncate(handle, inode);
4267
4268 up_write(&ei->i_data_sem);
Theodore Ts'od0abb362016-11-13 22:02:28 -05004269 if (err)
4270 goto out_stop;
Theodore Ts'o819c4922013-04-03 12:47:17 -04004271
4272 if (IS_SYNC(inode))
4273 ext4_handle_sync(handle);
4274
4275out_stop:
4276 /*
4277 * If this was a simple ftruncate() and the file will remain alive,
4278 * then we need to clear up the orphan record which we created above.
4279 * However, if this was a real unlink then we were called by
Wang Shilong58d86a52014-11-25 16:17:29 -05004280 * ext4_evict_inode(), and we allow that function to clean up the
Theodore Ts'o819c4922013-04-03 12:47:17 -04004281 * orphan info for us.
4282 */
4283 if (inode->i_nlink)
4284 ext4_orphan_del(handle, inode);
4285
Deepa Dinamanieeca7ea2016-11-14 21:40:10 -05004286 inode->i_mtime = inode->i_ctime = current_time(inode);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07004287 err2 = ext4_mark_inode_dirty(handle, inode);
4288 if (unlikely(err2 && !err))
4289 err = err2;
Theodore Ts'o819c4922013-04-03 12:47:17 -04004290 ext4_journal_stop(handle);
Alex Tomasa86c6182006-10-11 01:21:03 -07004291
zhengliang9a5d2652020-07-01 16:30:27 +08004292out_trace:
Jiaying Zhang0562e0b2011-03-21 21:38:05 -04004293 trace_ext4_truncate_exit(inode);
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05004294 return err;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004295}
4296
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004297/*
Mingming Cao617ba132006-10-11 01:20:53 -07004298 * ext4_get_inode_loc returns with an extra refcount against the inode's
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004299 * underlying buffer_head on success. If 'in_mem' is true, we have all
4300 * data in memory that is needed to recreate the on-disk version of this
4301 * inode.
4302 */
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004303static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4304 struct ext4_iloc *iloc, int in_mem,
4305 ext4_fsblk_t *ret_block)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004306{
Theodore Ts'o240799c2008-10-09 23:53:47 -04004307 struct ext4_group_desc *gdp;
4308 struct buffer_head *bh;
Theodore Ts'o240799c2008-10-09 23:53:47 -04004309 ext4_fsblk_t block;
Linus Torvalds02f03c42019-09-29 17:59:23 -07004310 struct blk_plug plug;
Theodore Ts'o240799c2008-10-09 23:53:47 -04004311 int inodes_per_block, inode_offset;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004312
Aneesh Kumar K.V3a06d772008-11-22 15:04:59 -05004313 iloc->bh = NULL;
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004314 if (ino < EXT4_ROOT_INO ||
4315 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
Darrick J. Wong6a797d22015-10-17 16:16:04 -04004316 return -EFSCORRUPTED;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004317
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004318 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004319 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4320 if (!gdp)
4321 return -EIO;
4322
4323 /*
4324 * Figure out the offset within the block group inode table
4325 */
Tao Ma00d09882011-05-09 10:26:41 -04004326 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004327 inode_offset = ((ino - 1) %
Theodore Ts'o240799c2008-10-09 23:53:47 -04004328 EXT4_INODES_PER_GROUP(sb));
4329 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4330 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4331
4332 bh = sb_getblk(sb, block);
Wang Shilongaebf0242013-01-12 16:28:47 -05004333 if (unlikely(!bh))
Theodore Ts'o860d21e2013-01-12 16:19:36 -05004334 return -ENOMEM;
Theodore Ts'o46f870d62019-11-21 13:09:43 -05004335 if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO))
4336 goto simulate_eio;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004337 if (!buffer_uptodate(bh)) {
4338 lock_buffer(bh);
Hidehiro Kawai9c83a922008-07-26 16:39:26 -04004339
zhangyi (F)60c776e2020-09-24 15:33:34 +08004340 if (ext4_buffer_uptodate(bh)) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004341 /* someone brought it uptodate while we waited */
4342 unlock_buffer(bh);
4343 goto has_buffer;
4344 }
4345
4346 /*
4347 * If we have all information of the inode in memory and this
4348 * is the only valid inode in the block, we need not read the
4349 * block.
4350 */
4351 if (in_mem) {
4352 struct buffer_head *bitmap_bh;
Theodore Ts'o240799c2008-10-09 23:53:47 -04004353 int i, start;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004354
Theodore Ts'o240799c2008-10-09 23:53:47 -04004355 start = inode_offset & ~(inodes_per_block - 1);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004356
4357 /* Is the inode bitmap in cache? */
Theodore Ts'o240799c2008-10-09 23:53:47 -04004358 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
Wang Shilongaebf0242013-01-12 16:28:47 -05004359 if (unlikely(!bitmap_bh))
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004360 goto make_io;
4361
4362 /*
4363 * If the inode bitmap isn't in cache then the
4364 * optimisation may end up performing two reads instead
4365 * of one, so skip it.
4366 */
4367 if (!buffer_uptodate(bitmap_bh)) {
4368 brelse(bitmap_bh);
4369 goto make_io;
4370 }
Theodore Ts'o240799c2008-10-09 23:53:47 -04004371 for (i = start; i < start + inodes_per_block; i++) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004372 if (i == inode_offset)
4373 continue;
Mingming Cao617ba132006-10-11 01:20:53 -07004374 if (ext4_test_bit(i, bitmap_bh->b_data))
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004375 break;
4376 }
4377 brelse(bitmap_bh);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004378 if (i == start + inodes_per_block) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004379 /* all other inodes are free, so skip I/O */
4380 memset(bh->b_data, 0, bh->b_size);
4381 set_buffer_uptodate(bh);
4382 unlock_buffer(bh);
4383 goto has_buffer;
4384 }
4385 }
4386
4387make_io:
4388 /*
Theodore Ts'o240799c2008-10-09 23:53:47 -04004389 * If we need to do any I/O, try to pre-readahead extra
4390 * blocks from the inode table.
4391 */
Linus Torvalds02f03c42019-09-29 17:59:23 -07004392 blk_start_plug(&plug);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004393 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4394 ext4_fsblk_t b, end, table;
4395 unsigned num;
Theodore Ts'o0d606e2c2013-04-23 08:59:35 -04004396 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
Theodore Ts'o240799c2008-10-09 23:53:47 -04004397
4398 table = ext4_inode_table(sb, gdp);
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04004399 /* s_inode_readahead_blks is always a power of 2 */
Theodore Ts'o0d606e2c2013-04-23 08:59:35 -04004400 b = block & ~((ext4_fsblk_t) ra_blks - 1);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004401 if (table > b)
4402 b = table;
Theodore Ts'o0d606e2c2013-04-23 08:59:35 -04004403 end = b + ra_blks;
Theodore Ts'o240799c2008-10-09 23:53:47 -04004404 num = EXT4_INODES_PER_GROUP(sb);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04004405 if (ext4_has_group_desc_csum(sb))
Aneesh Kumar K.V560671a2009-01-05 22:20:24 -05004406 num -= ext4_itable_unused_count(sb, gdp);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004407 table += num / inodes_per_block;
4408 if (end > table)
4409 end = table;
4410 while (b <= end)
zhangyi (F)5df1d412020-09-24 15:33:35 +08004411 ext4_sb_breadahead_unmovable(sb, b++);
Theodore Ts'o240799c2008-10-09 23:53:47 -04004412 }
4413
4414 /*
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004415 * There are other valid inodes in the buffer, this inode
4416 * has in-inode xattrs, or we don't have this inode in memory.
4417 * Read the block from disk.
4418 */
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004419 trace_ext4_load_inode(sb, ino);
zhangyi (F)2d069c02020-09-24 15:33:33 +08004420 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
Linus Torvalds02f03c42019-09-29 17:59:23 -07004421 blk_finish_plug(&plug);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004422 wait_on_buffer(bh);
4423 if (!buffer_uptodate(bh)) {
Theodore Ts'o46f870d62019-11-21 13:09:43 -05004424 simulate_eio:
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004425 if (ret_block)
4426 *ret_block = block;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004427 brelse(bh);
4428 return -EIO;
4429 }
4430 }
4431has_buffer:
4432 iloc->bh = bh;
4433 return 0;
4434}
4435
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004436static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4437 struct ext4_iloc *iloc)
4438{
4439 ext4_fsblk_t err_blk;
4440 int ret;
4441
4442 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0,
4443 &err_blk);
4444
4445 if (ret == -EIO)
4446 ext4_error_inode_block(inode, err_blk, EIO,
4447 "unable to read itable block");
4448
4449 return ret;
4450}
4451
Mingming Cao617ba132006-10-11 01:20:53 -07004452int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004453{
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004454 ext4_fsblk_t err_blk;
4455 int ret;
4456
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004457 /* We have all inode data except xattrs in memory here. */
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004458 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc,
4459 !ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk);
4460
4461 if (ret == -EIO)
4462 ext4_error_inode_block(inode, err_blk, EIO,
4463 "unable to read itable block");
4464
4465 return ret;
4466}
4467
4468
4469int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4470 struct ext4_iloc *iloc)
4471{
4472 return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004473}
4474
Ira Weinya8ab6d32020-05-28 07:59:58 -07004475static bool ext4_should_enable_dax(struct inode *inode)
Ross Zwisler66425862017-10-12 12:00:59 -04004476{
Ira Weinya8ab6d32020-05-28 07:59:58 -07004477 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4478
Ira Weiny9cb20f92020-05-28 08:00:00 -07004479 if (test_opt2(inode->i_sb, DAX_NEVER))
Ross Zwisler66425862017-10-12 12:00:59 -04004480 return false;
4481 if (!S_ISREG(inode->i_mode))
4482 return false;
4483 if (ext4_should_journal_data(inode))
4484 return false;
4485 if (ext4_has_inline_data(inode))
4486 return false;
Chandan Rajendra592ddec2018-12-12 15:20:10 +05304487 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
Ross Zwisler66425862017-10-12 12:00:59 -04004488 return false;
Eric Biggersc93d8f82019-07-22 09:26:24 -07004489 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4490 return false;
Ira Weinya8ab6d32020-05-28 07:59:58 -07004491 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4492 return false;
4493 if (test_opt(inode->i_sb, DAX_ALWAYS))
4494 return true;
4495
Ira Weinyb383a732020-05-28 08:00:02 -07004496 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
Ross Zwisler66425862017-10-12 12:00:59 -04004497}
4498
Ira Weiny043546e2020-05-28 07:59:59 -07004499void ext4_set_inode_flags(struct inode *inode, bool init)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004500{
Mingming Cao617ba132006-10-11 01:20:53 -07004501 unsigned int flags = EXT4_I(inode)->i_flags;
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004502 unsigned int new_fl = 0;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004503
Ira Weiny043546e2020-05-28 07:59:59 -07004504 WARN_ON_ONCE(IS_DAX(inode) && init);
4505
Mingming Cao617ba132006-10-11 01:20:53 -07004506 if (flags & EXT4_SYNC_FL)
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004507 new_fl |= S_SYNC;
Mingming Cao617ba132006-10-11 01:20:53 -07004508 if (flags & EXT4_APPEND_FL)
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004509 new_fl |= S_APPEND;
Mingming Cao617ba132006-10-11 01:20:53 -07004510 if (flags & EXT4_IMMUTABLE_FL)
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004511 new_fl |= S_IMMUTABLE;
Mingming Cao617ba132006-10-11 01:20:53 -07004512 if (flags & EXT4_NOATIME_FL)
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004513 new_fl |= S_NOATIME;
Mingming Cao617ba132006-10-11 01:20:53 -07004514 if (flags & EXT4_DIRSYNC_FL)
Theodore Ts'o00a1a052014-03-30 10:20:01 -04004515 new_fl |= S_DIRSYNC;
Ira Weiny043546e2020-05-28 07:59:59 -07004516
4517 /* Because of the way inode_set_flags() works we must preserve S_DAX
4518 * here if already set. */
4519 new_fl |= (inode->i_flags & S_DAX);
4520 if (init && ext4_should_enable_dax(inode))
Ross Zwisler923ae0f2015-02-16 15:59:38 -08004521 new_fl |= S_DAX;
Ira Weiny043546e2020-05-28 07:59:59 -07004522
Eric Biggers2ee6a572017-10-09 12:15:35 -07004523 if (flags & EXT4_ENCRYPT_FL)
4524 new_fl |= S_ENCRYPTED;
Gabriel Krisman Bertazib886ee32019-04-25 14:12:08 -04004525 if (flags & EXT4_CASEFOLD_FL)
4526 new_fl |= S_CASEFOLD;
Eric Biggersc93d8f82019-07-22 09:26:24 -07004527 if (flags & EXT4_VERITY_FL)
4528 new_fl |= S_VERITY;
Theodore Ts'o5f16f322014-03-24 14:43:12 -04004529 inode_set_flags(inode, new_fl,
Eric Biggers2ee6a572017-10-09 12:15:35 -07004530 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
Eric Biggersc93d8f82019-07-22 09:26:24 -07004531 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004532}
4533
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004534static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04004535 struct ext4_inode_info *ei)
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004536{
4537 blkcnt_t i_blocks ;
Aneesh Kumar K.V8180a562008-01-28 23:58:27 -05004538 struct inode *inode = &(ei->vfs_inode);
4539 struct super_block *sb = inode->i_sb;
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004540
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04004541 if (ext4_has_feature_huge_file(sb)) {
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004542 /* we are using combined 48 bit field */
4543 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4544 le32_to_cpu(raw_inode->i_blocks_lo);
Theodore Ts'o07a03822010-06-14 09:54:48 -04004545 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
Aneesh Kumar K.V8180a562008-01-28 23:58:27 -05004546 /* i_blocks represent file system block size */
4547 return i_blocks << (inode->i_blkbits - 9);
4548 } else {
4549 return i_blocks;
4550 }
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004551 } else {
4552 return le32_to_cpu(raw_inode->i_blocks_lo);
4553 }
4554}
Jan Karaff9ddf72007-07-18 09:24:20 -04004555
Theodore Ts'oeb9b5f02018-05-22 17:14:07 -04004556static inline int ext4_iget_extra_inode(struct inode *inode,
Tao Ma152a7b02012-12-02 11:13:24 -05004557 struct ext4_inode *raw_inode,
4558 struct ext4_inode_info *ei)
4559{
4560 __le32 *magic = (void *)raw_inode +
4561 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
Theodore Ts'oeb9b5f02018-05-22 17:14:07 -04004562
Eric Biggers290ab232016-12-01 14:51:58 -05004563 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4564 EXT4_INODE_SIZE(inode->i_sb) &&
4565 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
Tao Ma152a7b02012-12-02 11:13:24 -05004566 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
Theodore Ts'oeb9b5f02018-05-22 17:14:07 -04004567 return ext4_find_inline_data_nolock(inode);
Tao Maf19d5872012-12-10 14:05:51 -05004568 } else
4569 EXT4_I(inode)->i_inline_off = 0;
Theodore Ts'oeb9b5f02018-05-22 17:14:07 -04004570 return 0;
Tao Ma152a7b02012-12-02 11:13:24 -05004571}
4572
Li Xi040cb372016-01-08 16:01:21 -05004573int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4574{
Kaho Ng0b7b7772016-09-05 23:11:58 -04004575 if (!ext4_has_feature_project(inode->i_sb))
Li Xi040cb372016-01-08 16:01:21 -05004576 return -EOPNOTSUPP;
4577 *projid = EXT4_I(inode)->i_projid;
4578 return 0;
4579}
4580
Eryu Guane254d1a2018-05-10 11:55:31 -04004581/*
4582 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4583 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4584 * set.
4585 */
4586static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4587{
4588 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4589 inode_set_iversion_raw(inode, val);
4590 else
4591 inode_set_iversion_queried(inode, val);
4592}
4593static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4594{
4595 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4596 return inode_peek_iversion_raw(inode);
4597 else
4598 return inode_peek_iversion(inode);
4599}
4600
Theodore Ts'o8a363972018-12-19 12:29:13 -05004601struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4602 ext4_iget_flags flags, const char *function,
4603 unsigned int line)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004604{
Mingming Cao617ba132006-10-11 01:20:53 -07004605 struct ext4_iloc iloc;
4606 struct ext4_inode *raw_inode;
David Howells1d1fe1e2008-02-07 00:15:37 -08004607 struct ext4_inode_info *ei;
David Howells1d1fe1e2008-02-07 00:15:37 -08004608 struct inode *inode;
Jan Karab436b9b2009-12-08 23:51:10 -05004609 journal_t *journal = EXT4_SB(sb)->s_journal;
David Howells1d1fe1e2008-02-07 00:15:37 -08004610 long ret;
Darrick J. Wong7e6e1ef2016-12-10 09:55:01 -05004611 loff_t size;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004612 int block;
Eric W. Biederman08cefc72012-02-07 15:41:49 -08004613 uid_t i_uid;
4614 gid_t i_gid;
Li Xi040cb372016-01-08 16:01:21 -05004615 projid_t i_projid;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004616
Theodore Ts'o191ce172018-12-31 22:34:31 -05004617 if ((!(flags & EXT4_IGET_SPECIAL) &&
Theodore Ts'o8a363972018-12-19 12:29:13 -05004618 (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4619 (ino < EXT4_ROOT_INO) ||
4620 (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4621 if (flags & EXT4_IGET_HANDLE)
4622 return ERR_PTR(-ESTALE);
Jan Kara014c9ca2020-11-27 12:33:57 +01004623 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
Theodore Ts'o8a363972018-12-19 12:29:13 -05004624 "inode #%lu: comm %s: iget: illegal inode #",
4625 ino, current->comm);
4626 return ERR_PTR(-EFSCORRUPTED);
4627 }
4628
David Howells1d1fe1e2008-02-07 00:15:37 -08004629 inode = iget_locked(sb, ino);
4630 if (!inode)
4631 return ERR_PTR(-ENOMEM);
4632 if (!(inode->i_state & I_NEW))
4633 return inode;
4634
4635 ei = EXT4_I(inode);
Peter Huewe7dc57612011-02-21 21:01:42 -05004636 iloc.bh = NULL;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004637
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004638 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
David Howells1d1fe1e2008-02-07 00:15:37 -08004639 if (ret < 0)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004640 goto bad_inode;
Mingming Cao617ba132006-10-11 01:20:53 -07004641 raw_inode = ext4_raw_inode(&iloc);
Darrick J. Wong814525f2012-04-29 18:31:10 -04004642
Theodore Ts'o8e4b5ea2018-03-29 21:56:09 -04004643 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
Theodore Ts'o8a363972018-12-19 12:29:13 -05004644 ext4_error_inode(inode, function, line, 0,
4645 "iget: root inode unallocated");
Theodore Ts'o8e4b5ea2018-03-29 21:56:09 -04004646 ret = -EFSCORRUPTED;
4647 goto bad_inode;
4648 }
4649
Theodore Ts'o8a363972018-12-19 12:29:13 -05004650 if ((flags & EXT4_IGET_HANDLE) &&
4651 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4652 ret = -ESTALE;
4653 goto bad_inode;
4654 }
4655
Darrick J. Wong814525f2012-04-29 18:31:10 -04004656 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4657 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4658 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
Eric Biggers2dc8d9e2016-12-01 14:43:33 -05004659 EXT4_INODE_SIZE(inode->i_sb) ||
4660 (ei->i_extra_isize & 3)) {
Theodore Ts'o8a363972018-12-19 12:29:13 -05004661 ext4_error_inode(inode, function, line, 0,
4662 "iget: bad extra_isize %u "
4663 "(inode size %u)",
Eric Biggers2dc8d9e2016-12-01 14:43:33 -05004664 ei->i_extra_isize,
4665 EXT4_INODE_SIZE(inode->i_sb));
Darrick J. Wong6a797d22015-10-17 16:16:04 -04004666 ret = -EFSCORRUPTED;
Darrick J. Wong814525f2012-04-29 18:31:10 -04004667 goto bad_inode;
4668 }
4669 } else
4670 ei->i_extra_isize = 0;
4671
4672 /* Precompute checksum seed for inode metadata */
Dmitry Monakhov9aa5d32b2014-10-13 03:36:16 -04004673 if (ext4_has_metadata_csum(sb)) {
Darrick J. Wong814525f2012-04-29 18:31:10 -04004674 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4675 __u32 csum;
4676 __le32 inum = cpu_to_le32(inode->i_ino);
4677 __le32 gen = raw_inode->i_generation;
4678 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4679 sizeof(inum));
4680 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4681 sizeof(gen));
4682 }
4683
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004684 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4685 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4686 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4687 ext4_error_inode_err(inode, function, line, 0,
4688 EFSBADCRC, "iget: checksum invalid");
Darrick J. Wong6a797d22015-10-17 16:16:04 -04004689 ret = -EFSBADCRC;
Darrick J. Wong814525f2012-04-29 18:31:10 -04004690 goto bad_inode;
4691 }
4692
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004693 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
Eric W. Biederman08cefc72012-02-07 15:41:49 -08004694 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4695 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
Kaho Ng0b7b7772016-09-05 23:11:58 -04004696 if (ext4_has_feature_project(sb) &&
Li Xi040cb372016-01-08 16:01:21 -05004697 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4698 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4699 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4700 else
4701 i_projid = EXT4_DEF_PROJID;
4702
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04004703 if (!(test_opt(inode->i_sb, NO_UID32))) {
Eric W. Biederman08cefc72012-02-07 15:41:49 -08004704 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4705 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004706 }
Eric W. Biederman08cefc72012-02-07 15:41:49 -08004707 i_uid_write(inode, i_uid);
4708 i_gid_write(inode, i_gid);
Li Xi040cb372016-01-08 16:01:21 -05004709 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
Miklos Szeredibfe86842011-10-28 14:13:29 +02004710 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004711
Theodore Ts'o353eb832011-01-10 12:18:25 -05004712 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
Tao Ma67cf5b02012-12-10 14:04:46 -05004713 ei->i_inline_off = 0;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004714 ei->i_dir_start_lookup = 0;
4715 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4716 /* We now have enough fields to check if the inode was active or not.
4717 * This is needed because nfsd might try to access dead inodes
4718 * the test is that same one that e2fsck uses
4719 * NeilBrown 1999oct15
4720 */
4721 if (inode->i_nlink == 0) {
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -04004722 if ((inode->i_mode == 0 ||
4723 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4724 ino != EXT4_BOOT_LOADER_INO) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004725 /* this inode is deleted */
David Howells1d1fe1e2008-02-07 00:15:37 -08004726 ret = -ESTALE;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004727 goto bad_inode;
4728 }
4729 /* The only unlinked inodes we let through here have
4730 * valid i_mode and are being read by the orphan
4731 * recovery code: that's fine, we're about to complete
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -04004732 * the process of deleting those.
4733 * OR it is the EXT4_BOOT_LOADER_INO which is
4734 * not initialized on a new filesystem. */
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004735 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004736 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
Ira Weiny043546e2020-05-28 07:59:59 -07004737 ext4_set_inode_flags(inode, true);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004738 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
Aneesh Kumar K.V7973c0c2008-01-28 23:58:27 -05004739 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04004740 if (ext4_has_feature_64bit(sb))
Badari Pulavartya1ddeb72006-10-11 01:21:09 -07004741 ei->i_file_acl |=
4742 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
Artem Blagodarenkoe08ac992017-06-21 21:09:57 -04004743 inode->i_size = ext4_isize(sb, raw_inode);
Darrick J. Wong7e6e1ef2016-12-10 09:55:01 -05004744 if ((size = i_size_read(inode)) < 0) {
Theodore Ts'o8a363972018-12-19 12:29:13 -05004745 ext4_error_inode(inode, function, line, 0,
4746 "iget: bad i_size value: %lld", size);
Darrick J. Wong7e6e1ef2016-12-10 09:55:01 -05004747 ret = -EFSCORRUPTED;
4748 goto bad_inode;
4749 }
Jan Kara48a34312020-02-10 15:43:16 +01004750 /*
4751 * If dir_index is not enabled but there's dir with INDEX flag set,
4752 * we'd normally treat htree data as empty space. But with metadata
4753 * checksumming that corrupts checksums so forbid that.
4754 */
4755 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4756 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4757 ext4_error_inode(inode, function, line, 0,
4758 "iget: Dir with htree data on filesystem without dir_index feature.");
4759 ret = -EFSCORRUPTED;
4760 goto bad_inode;
4761 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004762 ei->i_disksize = inode->i_size;
Dmitry Monakhova9e7f442009-12-14 15:21:14 +03004763#ifdef CONFIG_QUOTA
4764 ei->i_reserved_quota = 0;
4765#endif
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004766 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4767 ei->i_block_group = iloc.block_group;
Theodore Ts'oa4912122009-03-12 12:18:34 -04004768 ei->i_last_alloc_group = ~0;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004769 /*
4770 * NOTE! The in-memory inode i_data array is in little-endian order
4771 * even on big-endian machines: we do NOT byteswap the block numbers!
4772 */
Mingming Cao617ba132006-10-11 01:20:53 -07004773 for (block = 0; block < EXT4_N_BLOCKS; block++)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004774 ei->i_data[block] = raw_inode->i_block[block];
4775 INIT_LIST_HEAD(&ei->i_orphan);
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07004776 ext4_fc_init_inode(&ei->vfs_inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004777
Jan Karab436b9b2009-12-08 23:51:10 -05004778 /*
4779 * Set transaction id's of transactions that have to be committed
4780 * to finish f[data]sync. We set them to currently running transaction
4781 * as we cannot be sure that the inode or some of its metadata isn't
4782 * part of the transaction - the inode could have been reclaimed and
4783 * now it is reread from disk.
4784 */
4785 if (journal) {
4786 transaction_t *transaction;
4787 tid_t tid;
4788
Theodore Ts'oa931da62010-08-03 21:35:12 -04004789 read_lock(&journal->j_state_lock);
Jan Karab436b9b2009-12-08 23:51:10 -05004790 if (journal->j_running_transaction)
4791 transaction = journal->j_running_transaction;
4792 else
4793 transaction = journal->j_committing_transaction;
4794 if (transaction)
4795 tid = transaction->t_tid;
4796 else
4797 tid = journal->j_commit_sequence;
Theodore Ts'oa931da62010-08-03 21:35:12 -04004798 read_unlock(&journal->j_state_lock);
Jan Karab436b9b2009-12-08 23:51:10 -05004799 ei->i_sync_tid = tid;
4800 ei->i_datasync_tid = tid;
4801 }
4802
Eric Sandeen0040d982008-02-05 22:36:43 -05004803 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004804 if (ei->i_extra_isize == 0) {
4805 /* The extra space is currently unused. Use it. */
Eric Biggers2dc8d9e2016-12-01 14:43:33 -05004806 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
Mingming Cao617ba132006-10-11 01:20:53 -07004807 ei->i_extra_isize = sizeof(struct ext4_inode) -
4808 EXT4_GOOD_OLD_INODE_SIZE;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004809 } else {
Theodore Ts'oeb9b5f02018-05-22 17:14:07 -04004810 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4811 if (ret)
4812 goto bad_inode;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004813 }
Darrick J. Wong814525f2012-04-29 18:31:10 -04004814 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004815
Kalpak Shahef7f3832007-07-18 09:15:20 -04004816 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4817 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4818 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4819 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4820
Theodore Ts'oed3654e2014-03-24 14:09:06 -04004821 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
Jeff Laytonee73f9a2018-01-09 08:21:39 -05004822 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4823
Theodore Ts'oc4f65702014-03-20 00:32:57 -04004824 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4825 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
Jeff Laytonee73f9a2018-01-09 08:21:39 -05004826 ivers |=
Theodore Ts'oc4f65702014-03-20 00:32:57 -04004827 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4828 }
Eryu Guane254d1a2018-05-10 11:55:31 -04004829 ext4_inode_set_iversion_queried(inode, ivers);
Jean Noel Cordenner25ec56b2008-01-28 23:58:27 -05004830 }
4831
Theodore Ts'oc4b5a612009-04-24 18:45:35 -04004832 ret = 0;
Theodore Ts'o485c26e2009-04-24 13:43:20 -04004833 if (ei->i_file_acl &&
Jan Karace9f24c2020-07-28 15:04:34 +02004834 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
Theodore Ts'o8a363972018-12-19 12:29:13 -05004835 ext4_error_inode(inode, function, line, 0,
4836 "iget: bad extended attribute block %llu",
Theodore Ts'o24676da2010-05-16 21:00:00 -04004837 ei->i_file_acl);
Darrick J. Wong6a797d22015-10-17 16:16:04 -04004838 ret = -EFSCORRUPTED;
Theodore Ts'o485c26e2009-04-24 13:43:20 -04004839 goto bad_inode;
Tao Maf19d5872012-12-10 14:05:51 -05004840 } else if (!ext4_has_inline_data(inode)) {
Liu Songbc716522018-08-02 00:11:16 -04004841 /* validate the block references in the inode */
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07004842 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4843 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4844 (S_ISLNK(inode->i_mode) &&
4845 !ext4_inode_is_fast_symlink(inode)))) {
Liu Songbc716522018-08-02 00:11:16 -04004846 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
Tao Maf19d5872012-12-10 14:05:51 -05004847 ret = ext4_ext_check_inode(inode);
Liu Songbc716522018-08-02 00:11:16 -04004848 else
4849 ret = ext4_ind_check_inode(inode);
Tao Maf19d5872012-12-10 14:05:51 -05004850 }
Thiemo Nagelfe2c8192009-03-31 08:36:10 -04004851 }
Theodore Ts'o567f3e92009-11-14 08:19:05 -05004852 if (ret)
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04004853 goto bad_inode;
Aneesh Kumar K.V7a262f72009-03-27 16:39:58 -04004854
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004855 if (S_ISREG(inode->i_mode)) {
Mingming Cao617ba132006-10-11 01:20:53 -07004856 inode->i_op = &ext4_file_inode_operations;
Boaz Harroshbe64f882015-04-15 16:15:17 -07004857 inode->i_fop = &ext4_file_operations;
Mingming Cao617ba132006-10-11 01:20:53 -07004858 ext4_set_aops(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004859 } else if (S_ISDIR(inode->i_mode)) {
Mingming Cao617ba132006-10-11 01:20:53 -07004860 inode->i_op = &ext4_dir_inode_operations;
4861 inode->i_fop = &ext4_dir_operations;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004862 } else if (S_ISLNK(inode->i_mode)) {
Luis R. Rodriguez6390d332018-05-13 16:45:56 -04004863 /* VFS does not allow setting these so must be corruption */
4864 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
Theodore Ts'o8a363972018-12-19 12:29:13 -05004865 ext4_error_inode(inode, function, line, 0,
4866 "iget: immutable or append flags "
4867 "not allowed on symlinks");
Luis R. Rodriguez6390d332018-05-13 16:45:56 -04004868 ret = -EFSCORRUPTED;
4869 goto bad_inode;
4870 }
Chandan Rajendra592ddec2018-12-12 15:20:10 +05304871 if (IS_ENCRYPTED(inode)) {
Al Viroa7a67e82015-04-27 17:51:30 -04004872 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4873 ext4_set_aops(inode);
4874 } else if (ext4_inode_is_fast_symlink(inode)) {
Al Viro75e75662015-05-02 10:13:58 -04004875 inode->i_link = (char *)ei->i_data;
Mingming Cao617ba132006-10-11 01:20:53 -07004876 inode->i_op = &ext4_fast_symlink_inode_operations;
Duane Griffine83c1392008-12-19 20:47:15 +00004877 nd_terminate_link(ei->i_data, inode->i_size,
4878 sizeof(ei->i_data) - 1);
4879 } else {
Mingming Cao617ba132006-10-11 01:20:53 -07004880 inode->i_op = &ext4_symlink_inode_operations;
4881 ext4_set_aops(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004882 }
Al Viro21fc61c2015-11-17 01:07:57 -05004883 inode_nohighmem(inode);
Theodore Ts'o563bdd62009-03-26 00:06:19 -04004884 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4885 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
Mingming Cao617ba132006-10-11 01:20:53 -07004886 inode->i_op = &ext4_special_inode_operations;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004887 if (raw_inode->i_block[0])
4888 init_special_inode(inode, inode->i_mode,
4889 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4890 else
4891 init_special_inode(inode, inode->i_mode,
4892 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -04004893 } else if (ino == EXT4_BOOT_LOADER_INO) {
4894 make_bad_inode(inode);
Theodore Ts'o563bdd62009-03-26 00:06:19 -04004895 } else {
Darrick J. Wong6a797d22015-10-17 16:16:04 -04004896 ret = -EFSCORRUPTED;
Theodore Ts'o8a363972018-12-19 12:29:13 -05004897 ext4_error_inode(inode, function, line, 0,
4898 "iget: bogus i_mode (%o)", inode->i_mode);
Theodore Ts'o563bdd62009-03-26 00:06:19 -04004899 goto bad_inode;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004900 }
Theodore Ts'o6456ca62019-09-03 01:43:17 -04004901 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
4902 ext4_error_inode(inode, function, line, 0,
4903 "casefold flag without casefold feature");
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04004904 brelse(iloc.bh);
Tahsin Erdogandec214d2017-06-22 11:44:55 -04004905
David Howells1d1fe1e2008-02-07 00:15:37 -08004906 unlock_new_inode(inode);
4907 return inode;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004908
4909bad_inode:
Theodore Ts'o567f3e92009-11-14 08:19:05 -05004910 brelse(iloc.bh);
David Howells1d1fe1e2008-02-07 00:15:37 -08004911 iget_failed(inode);
4912 return ERR_PTR(ret);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004913}
4914
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004915static int ext4_inode_blocks_set(handle_t *handle,
4916 struct ext4_inode *raw_inode,
4917 struct ext4_inode_info *ei)
4918{
4919 struct inode *inode = &(ei->vfs_inode);
Qian Cai28936b62020-02-21 23:32:58 -05004920 u64 i_blocks = READ_ONCE(inode->i_blocks);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004921 struct super_block *sb = inode->i_sb;
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004922
4923 if (i_blocks <= ~0U) {
4924 /*
Anatol Pomozov4907cb72012-09-01 10:31:09 -07004925 * i_blocks can be represented in a 32 bit variable
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004926 * as multiple of 512 bytes
4927 */
Aneesh Kumar K.V8180a562008-01-28 23:58:27 -05004928 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004929 raw_inode->i_blocks_high = 0;
Dmitry Monakhov84a8dce2010-06-05 11:51:27 -04004930 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
Theodore Ts'of287a1a2008-10-16 22:50:48 -04004931 return 0;
4932 }
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04004933 if (!ext4_has_feature_huge_file(sb))
Theodore Ts'of287a1a2008-10-16 22:50:48 -04004934 return -EFBIG;
4935
4936 if (i_blocks <= 0xffffffffffffULL) {
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004937 /*
4938 * i_blocks can be represented in a 48 bit variable
4939 * as multiple of 512 bytes
4940 */
Aneesh Kumar K.V8180a562008-01-28 23:58:27 -05004941 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004942 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
Dmitry Monakhov84a8dce2010-06-05 11:51:27 -04004943 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004944 } else {
Dmitry Monakhov84a8dce2010-06-05 11:51:27 -04004945 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
Aneesh Kumar K.V8180a562008-01-28 23:58:27 -05004946 /* i_block is stored in file system block size */
4947 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4948 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4949 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004950 }
Theodore Ts'of287a1a2008-10-16 22:50:48 -04004951 return 0;
Aneesh Kumar K.V0fc1b452008-01-28 23:58:26 -05004952}
4953
David Howells3f19b2a2017-12-01 11:40:16 +00004954static void __ext4_update_other_inode_time(struct super_block *sb,
4955 unsigned long orig_ino,
4956 unsigned long ino,
4957 struct ext4_inode *raw_inode)
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004958{
David Howells3f19b2a2017-12-01 11:40:16 +00004959 struct inode *inode;
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004960
David Howells3f19b2a2017-12-01 11:40:16 +00004961 inode = find_inode_by_ino_rcu(sb, ino);
4962 if (!inode)
4963 return;
4964
Eric Biggersed296c62021-01-12 11:02:53 -08004965 if (!inode_is_dirtytime_only(inode))
David Howells3f19b2a2017-12-01 11:40:16 +00004966 return;
4967
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004968 spin_lock(&inode->i_lock);
Eric Biggersed296c62021-01-12 11:02:53 -08004969 if (inode_is_dirtytime_only(inode)) {
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004970 struct ext4_inode_info *ei = EXT4_I(inode);
4971
Jan Kara5fcd5752020-05-29 16:24:43 +02004972 inode->i_state &= ~I_DIRTY_TIME;
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004973 spin_unlock(&inode->i_lock);
4974
4975 spin_lock(&ei->i_raw_lock);
David Howells3f19b2a2017-12-01 11:40:16 +00004976 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4977 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4978 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4979 ext4_inode_csum_set(inode, raw_inode, ei);
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004980 spin_unlock(&ei->i_raw_lock);
David Howells3f19b2a2017-12-01 11:40:16 +00004981 trace_ext4_other_inode_update_time(inode, orig_ino);
4982 return;
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004983 }
4984 spin_unlock(&inode->i_lock);
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004985}
4986
4987/*
4988 * Opportunistically update the other time fields for other inodes in
4989 * the same inode table block.
4990 */
4991static void ext4_update_other_inodes_time(struct super_block *sb,
4992 unsigned long orig_ino, char *buf)
4993{
Theodore Ts'oa26f4992015-02-02 00:37:02 -05004994 unsigned long ino;
4995 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4996 int inode_size = EXT4_INODE_SIZE(sb);
4997
Theodore Ts'o0f0ff9a2015-07-01 23:37:46 -04004998 /*
4999 * Calculate the first inode in the inode table block. Inode
5000 * numbers are one-based. That is, the first inode in a block
5001 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5002 */
5003 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
David Howells3f19b2a2017-12-01 11:40:16 +00005004 rcu_read_lock();
Theodore Ts'oa26f4992015-02-02 00:37:02 -05005005 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5006 if (ino == orig_ino)
5007 continue;
David Howells3f19b2a2017-12-01 11:40:16 +00005008 __ext4_update_other_inode_time(sb, orig_ino, ino,
5009 (struct ext4_inode *)buf);
Theodore Ts'oa26f4992015-02-02 00:37:02 -05005010 }
David Howells3f19b2a2017-12-01 11:40:16 +00005011 rcu_read_unlock();
Theodore Ts'oa26f4992015-02-02 00:37:02 -05005012}
5013
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005014/*
5015 * Post the struct inode info into an on-disk inode location in the
5016 * buffer-cache. This gobbles the caller's reference to the
5017 * buffer_head in the inode location struct.
5018 *
5019 * The caller must have write access to iloc->bh.
5020 */
Mingming Cao617ba132006-10-11 01:20:53 -07005021static int ext4_do_update_inode(handle_t *handle,
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005022 struct inode *inode,
Frank Mayhar830156c2009-09-29 10:07:47 -04005023 struct ext4_iloc *iloc)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005024{
Mingming Cao617ba132006-10-11 01:20:53 -07005025 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5026 struct ext4_inode_info *ei = EXT4_I(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005027 struct buffer_head *bh = iloc->bh;
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005028 struct super_block *sb = inode->i_sb;
Shijie Luo7d8bd3c2021-03-12 01:50:51 -05005029 int err = 0, block;
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005030 int need_datasync = 0, set_large_file = 0;
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005031 uid_t i_uid;
5032 gid_t i_gid;
Li Xi040cb372016-01-08 16:01:21 -05005033 projid_t i_projid;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005034
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005035 spin_lock(&ei->i_raw_lock);
5036
5037 /* For fields not tracked in the in-memory inode,
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005038 * initialise them to zero for new inodes. */
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05005039 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
Mingming Cao617ba132006-10-11 01:20:53 -07005040 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005041
Luo Meng13221812020-10-20 09:36:31 +08005042 err = ext4_inode_blocks_set(handle, raw_inode, ei);
5043 if (err) {
5044 spin_unlock(&ei->i_raw_lock);
5045 goto out_brelse;
5046 }
5047
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005048 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005049 i_uid = i_uid_read(inode);
5050 i_gid = i_gid_read(inode);
Li Xi040cb372016-01-08 16:01:21 -05005051 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04005052 if (!(test_opt(inode->i_sb, NO_UID32))) {
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005053 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5054 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005055/*
5056 * Fix up interoperability with old kernels. Otherwise, old inodes get
5057 * re-used with the upper 16 bits of the uid/gid intact
5058 */
Daeho Jeong93e3b4e2016-09-05 22:56:10 -04005059 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5060 raw_inode->i_uid_high = 0;
5061 raw_inode->i_gid_high = 0;
5062 } else {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005063 raw_inode->i_uid_high =
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005064 cpu_to_le16(high_16_bits(i_uid));
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005065 raw_inode->i_gid_high =
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005066 cpu_to_le16(high_16_bits(i_gid));
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005067 }
5068 } else {
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005069 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5070 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005071 raw_inode->i_uid_high = 0;
5072 raw_inode->i_gid_high = 0;
5073 }
5074 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
Kalpak Shahef7f3832007-07-18 09:15:20 -04005075
5076 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5077 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5078 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5079 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5080
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005081 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
Theodore Ts'o353eb832011-01-10 12:18:25 -05005082 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
Theodore Ts'oed3654e2014-03-24 14:09:06 -04005083 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
Badari Pulavartya1ddeb72006-10-11 01:21:09 -07005084 raw_inode->i_file_acl_high =
5085 cpu_to_le16(ei->i_file_acl >> 32);
Aneesh Kumar K.V7973c0c2008-01-28 23:58:27 -05005086 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
Qiujun Huangdce8e232020-02-24 23:02:46 +08005087 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
Jan Karab71fc072012-09-26 21:52:20 -04005088 ext4_isize_set(raw_inode, ei->i_disksize);
5089 need_datasync = 1;
5090 }
Aneesh Kumar K.Va48380f2008-01-28 23:58:27 -05005091 if (ei->i_disksize > 0x7fffffffULL) {
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04005092 if (!ext4_has_feature_large_file(sb) ||
Aneesh Kumar K.Va48380f2008-01-28 23:58:27 -05005093 EXT4_SB(sb)->s_es->s_rev_level ==
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005094 cpu_to_le32(EXT4_GOOD_OLD_REV))
5095 set_large_file = 1;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005096 }
5097 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5098 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5099 if (old_valid_dev(inode->i_rdev)) {
5100 raw_inode->i_block[0] =
5101 cpu_to_le32(old_encode_dev(inode->i_rdev));
5102 raw_inode->i_block[1] = 0;
5103 } else {
5104 raw_inode->i_block[0] = 0;
5105 raw_inode->i_block[1] =
5106 cpu_to_le32(new_encode_dev(inode->i_rdev));
5107 raw_inode->i_block[2] = 0;
5108 }
Tao Maf19d5872012-12-10 14:05:51 -05005109 } else if (!ext4_has_inline_data(inode)) {
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04005110 for (block = 0; block < EXT4_N_BLOCKS; block++)
5111 raw_inode->i_block[block] = ei->i_data[block];
Tao Maf19d5872012-12-10 14:05:51 -05005112 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005113
Theodore Ts'oed3654e2014-03-24 14:09:06 -04005114 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
Eryu Guane254d1a2018-05-10 11:55:31 -04005115 u64 ivers = ext4_inode_peek_iversion(inode);
Jeff Laytonee73f9a2018-01-09 08:21:39 -05005116
5117 raw_inode->i_disk_version = cpu_to_le32(ivers);
Theodore Ts'oc4f65702014-03-20 00:32:57 -04005118 if (ei->i_extra_isize) {
5119 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5120 raw_inode->i_version_hi =
Jeff Laytonee73f9a2018-01-09 08:21:39 -05005121 cpu_to_le32(ivers >> 32);
Theodore Ts'oc4f65702014-03-20 00:32:57 -04005122 raw_inode->i_extra_isize =
5123 cpu_to_le16(ei->i_extra_isize);
5124 }
Jean Noel Cordenner25ec56b2008-01-28 23:58:27 -05005125 }
Li Xi040cb372016-01-08 16:01:21 -05005126
Kaho Ng0b7b7772016-09-05 23:11:58 -04005127 BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
Li Xi040cb372016-01-08 16:01:21 -05005128 i_projid != EXT4_DEF_PROJID);
5129
5130 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5131 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5132 raw_inode->i_projid = cpu_to_le32(i_projid);
5133
Darrick J. Wong814525f2012-04-29 18:31:10 -04005134 ext4_inode_csum_set(inode, raw_inode, ei);
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005135 spin_unlock(&ei->i_raw_lock);
Linus Torvalds1751e8a2017-11-27 13:05:09 -08005136 if (inode->i_sb->s_flags & SB_LAZYTIME)
Theodore Ts'oa26f4992015-02-02 00:37:02 -05005137 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5138 bh->b_data);
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005139
Frank Mayhar830156c2009-09-29 10:07:47 -04005140 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
Shijie Luo7d8bd3c2021-03-12 01:50:51 -05005141 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5142 if (err)
5143 goto out_brelse;
Theodore Ts'o19f5fb72010-01-24 14:34:07 -05005144 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005145 if (set_large_file) {
liang xie5d601252014-05-12 22:06:43 -04005146 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005147 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5148 if (err)
5149 goto out_brelse;
Jan Kara05c2c002020-12-16 11:18:39 +01005150 lock_buffer(EXT4_SB(sb)->s_sbh);
Darrick J. Wonge2b911c2015-10-17 16:18:43 -04005151 ext4_set_feature_large_file(sb);
Jan Kara05c2c002020-12-16 11:18:39 +01005152 ext4_superblock_csum_set(sb);
5153 unlock_buffer(EXT4_SB(sb)->s_sbh);
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005154 ext4_handle_sync(handle);
Jan Karaa3f5cf12020-12-16 11:18:44 +01005155 err = ext4_handle_dirty_metadata(handle, NULL,
5156 EXT4_SB(sb)->s_sbh);
Theodore Ts'o202ee5d2014-04-21 14:37:55 -04005157 }
Jan Karab71fc072012-09-26 21:52:20 -04005158 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005159out_brelse:
Theodore Ts'oaf5bc922008-09-08 22:25:24 -04005160 brelse(bh);
Mingming Cao617ba132006-10-11 01:20:53 -07005161 ext4_std_error(inode->i_sb, err);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005162 return err;
5163}
5164
5165/*
Mingming Cao617ba132006-10-11 01:20:53 -07005166 * ext4_write_inode()
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005167 *
5168 * We are called from a few places:
5169 *
Theodore Ts'o87f7e412014-04-08 11:38:28 -04005170 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005171 * Here, there will be no transaction running. We wait for any running
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005172 * transaction to commit.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005173 *
Theodore Ts'o87f7e412014-04-08 11:38:28 -04005174 * - Within flush work (sys_sync(), kupdate and such).
5175 * We wait on commit, if told to.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005176 *
Theodore Ts'o87f7e412014-04-08 11:38:28 -04005177 * - Within iput_final() -> write_inode_now()
5178 * We wait on commit, if told to.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005179 *
5180 * In all cases it is actually safe for us to return without doing anything,
5181 * because the inode has been copied into a raw inode buffer in
Theodore Ts'o87f7e412014-04-08 11:38:28 -04005182 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5183 * writeback.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005184 *
5185 * Note that we are absolutely dependent upon all inode dirtiers doing the
5186 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5187 * which we are interested.
5188 *
5189 * It would be a bug for them to not do this. The code:
5190 *
5191 * mark_inode_dirty(inode)
5192 * stuff();
5193 * inode->i_size = expr;
5194 *
Theodore Ts'o87f7e412014-04-08 11:38:28 -04005195 * is in error because write_inode() could occur while `stuff()' is running,
5196 * and the new i_size will be lost. Plus the inode will no longer be on the
5197 * superblock's dirty inode list.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005198 */
Christoph Hellwiga9185b42010-03-05 09:21:37 +01005199int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005200{
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005201 int err;
5202
Theodore Ts'o18f2c4f2018-12-19 14:36:58 -05005203 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5204 sb_rdonly(inode->i_sb))
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005205 return 0;
5206
Theodore Ts'o18f2c4f2018-12-19 14:36:58 -05005207 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5208 return -EIO;
5209
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005210 if (EXT4_SB(inode->i_sb)->s_journal) {
5211 if (ext4_journal_current_handle()) {
5212 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5213 dump_stack();
5214 return -EIO;
5215 }
5216
Jan Kara10542c22014-03-04 10:50:50 -05005217 /*
5218 * No need to force transaction in WB_SYNC_NONE mode. Also
5219 * ext4_sync_fs() will force the commit after everything is
5220 * written.
5221 */
5222 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005223 return 0;
5224
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005225 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
Theodore Ts'o18f2c4f2018-12-19 14:36:58 -05005226 EXT4_I(inode)->i_sync_tid);
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005227 } else {
5228 struct ext4_iloc iloc;
5229
Harshad Shirwadkar8016e292020-10-15 13:37:59 -07005230 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005231 if (err)
5232 return err;
Jan Kara10542c22014-03-04 10:50:50 -05005233 /*
5234 * sync(2) will flush the whole buffer cache. No need to do
5235 * it here separately for each inode.
5236 */
5237 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
Frank Mayhar830156c2009-09-29 10:07:47 -04005238 sync_dirty_buffer(iloc.bh);
5239 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
Theodore Ts'o54d3adb2020-03-28 19:33:43 -04005240 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5241 "IO error syncing inode");
Frank Mayhar830156c2009-09-29 10:07:47 -04005242 err = -EIO;
5243 }
Curt Wohlgemuthfd2dd9f2010-04-03 17:44:16 -04005244 brelse(iloc.bh);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005245 }
Frank Mayhar91ac6f42009-09-09 22:33:47 -04005246 return err;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005247}
5248
5249/*
Jan Kara53e87262012-12-25 13:29:52 -05005250 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5251 * buffers that are attached to a page stradding i_size and are undergoing
5252 * commit. In that case we have to wait for commit to finish and try again.
5253 */
5254static void ext4_wait_for_tail_page_commit(struct inode *inode)
5255{
5256 struct page *page;
5257 unsigned offset;
5258 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5259 tid_t commit_tid = 0;
5260 int ret;
5261
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005262 offset = inode->i_size & (PAGE_SIZE - 1);
Jan Kara53e87262012-12-25 13:29:52 -05005263 /*
yangerkun565333a2019-09-19 14:35:08 +08005264 * If the page is fully truncated, we don't need to wait for any commit
5265 * (and we even should not as __ext4_journalled_invalidatepage() may
5266 * strip all buffers from the page but keep the page dirty which can then
5267 * confuse e.g. concurrent ext4_writepage() seeing dirty page without
5268 * buffers). Also we don't need to wait for any commit if all buffers in
5269 * the page remain valid. This is most beneficial for the common case of
5270 * blocksize == PAGESIZE.
Jan Kara53e87262012-12-25 13:29:52 -05005271 */
yangerkun565333a2019-09-19 14:35:08 +08005272 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
Jan Kara53e87262012-12-25 13:29:52 -05005273 return;
5274 while (1) {
5275 page = find_lock_page(inode->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005276 inode->i_size >> PAGE_SHIFT);
Jan Kara53e87262012-12-25 13:29:52 -05005277 if (!page)
5278 return;
Lukas Czernerca99fdd2013-05-21 23:25:01 -04005279 ret = __ext4_journalled_invalidatepage(page, offset,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005280 PAGE_SIZE - offset);
Jan Kara53e87262012-12-25 13:29:52 -05005281 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03005282 put_page(page);
Jan Kara53e87262012-12-25 13:29:52 -05005283 if (ret != -EBUSY)
5284 return;
5285 commit_tid = 0;
5286 read_lock(&journal->j_state_lock);
5287 if (journal->j_committing_transaction)
5288 commit_tid = journal->j_committing_transaction->t_tid;
5289 read_unlock(&journal->j_state_lock);
5290 if (commit_tid)
5291 jbd2_log_wait_commit(journal, commit_tid);
5292 }
5293}
5294
5295/*
Mingming Cao617ba132006-10-11 01:20:53 -07005296 * ext4_setattr()
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005297 *
5298 * Called from notify_change.
5299 *
5300 * We want to trap VFS attempts to truncate the file as soon as
5301 * possible. In particular, we want to make sure that when the VFS
5302 * shrinks i_size, we put the inode on the orphan list and modify
5303 * i_disksize immediately, so that during the subsequent flushing of
5304 * dirty pages and freeing of disk blocks, we can guarantee that any
5305 * commit will leave the blocks being flushed in an unused state on
5306 * disk. (On recovery, the inode will get truncated and the blocks will
5307 * be freed, so we have a strong guarantee that no future commit will
5308 * leave these blocks visible to the user.)
5309 *
Jan Kara678aaf42008-07-11 19:27:31 -04005310 * Another thing we have to assure is that if we are in ordered mode
5311 * and inode is still attached to the committing transaction, we must
5312 * we start writeout of all the dirty pages which are being truncated.
5313 * This way we are sure that all the data written in the previous
5314 * transaction are already on disk (truncate waits for pages under
5315 * writeback).
5316 *
5317 * Called with inode->i_mutex down.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005318 */
Christian Brauner549c7292021-01-21 14:19:43 +01005319int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5320 struct iattr *attr)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005321{
David Howells2b0143b2015-03-17 22:25:59 +00005322 struct inode *inode = d_inode(dentry);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005323 int error, rc = 0;
Dmitry Monakhov3d287de2010-10-27 22:08:46 -04005324 int orphan = 0;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005325 const unsigned int ia_valid = attr->ia_valid;
5326
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05005327 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5328 return -EIO;
5329
Theodore Ts'o02b016c2019-06-09 22:04:33 -04005330 if (unlikely(IS_IMMUTABLE(inode)))
5331 return -EPERM;
5332
5333 if (unlikely(IS_APPEND(inode) &&
5334 (ia_valid & (ATTR_MODE | ATTR_UID |
5335 ATTR_GID | ATTR_TIMES_SET))))
5336 return -EPERM;
5337
Christian Brauner14f3db52021-01-21 14:19:57 +01005338 error = setattr_prepare(mnt_userns, dentry, attr);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005339 if (error)
5340 return error;
5341
Eric Biggers3ce2b8d2017-10-18 20:21:58 -04005342 error = fscrypt_prepare_setattr(dentry, attr);
5343 if (error)
5344 return error;
5345
Eric Biggersc93d8f82019-07-22 09:26:24 -07005346 error = fsverity_prepare_setattr(dentry, attr);
5347 if (error)
5348 return error;
5349
Jan Karaa7cdade2015-06-29 16:22:54 +02005350 if (is_quota_modification(inode, attr)) {
5351 error = dquot_initialize(inode);
5352 if (error)
5353 return error;
5354 }
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005355 ext4_fc_start_update(inode);
Eric W. Biederman08cefc72012-02-07 15:41:49 -08005356 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5357 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005358 handle_t *handle;
5359
5360 /* (user+group)*(old+new) structure, inode write (sb,
5361 * inode block, ? - but truncate inode update has it) */
Theodore Ts'o9924a922013-02-08 21:59:22 -05005362 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5363 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5364 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005365 if (IS_ERR(handle)) {
5366 error = PTR_ERR(handle);
5367 goto err_out;
5368 }
Tahsin Erdogan7a9ca532017-06-22 11:46:48 -04005369
5370 /* dquot_transfer() calls back ext4_get_inode_usage() which
5371 * counts xattr inode references.
5372 */
5373 down_read(&EXT4_I(inode)->xattr_sem);
Christoph Hellwigb43fa822010-03-03 09:05:03 -05005374 error = dquot_transfer(inode, attr);
Tahsin Erdogan7a9ca532017-06-22 11:46:48 -04005375 up_read(&EXT4_I(inode)->xattr_sem);
5376
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005377 if (error) {
Mingming Cao617ba132006-10-11 01:20:53 -07005378 ext4_journal_stop(handle);
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005379 ext4_fc_stop_update(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005380 return error;
5381 }
5382 /* Update corresponding info in inode so that everything is in
5383 * one transaction */
5384 if (attr->ia_valid & ATTR_UID)
5385 inode->i_uid = attr->ia_uid;
5386 if (attr->ia_valid & ATTR_GID)
5387 inode->i_gid = attr->ia_gid;
Mingming Cao617ba132006-10-11 01:20:53 -07005388 error = ext4_mark_inode_dirty(handle, inode);
5389 ext4_journal_stop(handle);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07005390 if (unlikely(error))
5391 return error;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005392 }
5393
Josef Bacik3da40c72015-06-22 00:31:26 -04005394 if (attr->ia_valid & ATTR_SIZE) {
Jan Kara52083862013-08-17 10:07:17 -04005395 handle_t *handle;
Josef Bacik3da40c72015-06-22 00:31:26 -04005396 loff_t oldsize = inode->i_size;
Jan Karab9c1c262019-05-30 11:56:23 -04005397 int shrink = (attr->ia_size < inode->i_size);
Christoph Hellwig562c72aa52011-06-24 14:29:45 -04005398
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04005399 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
Eric Sandeene2b46572008-01-28 23:58:27 -05005400 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5401
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005402 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5403 ext4_fc_stop_update(inode);
Theodore Ts'o0c095c72010-07-27 11:56:06 -04005404 return -EFBIG;
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005405 }
Eric Sandeene2b46572008-01-28 23:58:27 -05005406 }
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005407 if (!S_ISREG(inode->i_mode)) {
5408 ext4_fc_stop_update(inode);
Josef Bacik3da40c72015-06-22 00:31:26 -04005409 return -EINVAL;
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005410 }
Christoph Hellwigdff6efc2013-11-19 07:17:07 -08005411
5412 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5413 inode_inc_iversion(inode);
5414
Jan Karab9c1c262019-05-30 11:56:23 -04005415 if (shrink) {
5416 if (ext4_should_order_data(inode)) {
5417 error = ext4_begin_ordered_truncate(inode,
Jan Kara678aaf42008-07-11 19:27:31 -04005418 attr->ia_size);
Jan Karab9c1c262019-05-30 11:56:23 -04005419 if (error)
5420 goto err_out;
5421 }
5422 /*
5423 * Blocks are going to be removed from the inode. Wait
5424 * for dio in flight.
5425 */
5426 inode_dio_wait(inode);
Josef Bacik3da40c72015-06-22 00:31:26 -04005427 }
Jan Karab9c1c262019-05-30 11:56:23 -04005428
5429 down_write(&EXT4_I(inode)->i_mmap_sem);
5430
5431 rc = ext4_break_layouts(inode);
5432 if (rc) {
5433 up_write(&EXT4_I(inode)->i_mmap_sem);
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005434 goto err_out;
Jan Karab9c1c262019-05-30 11:56:23 -04005435 }
5436
Josef Bacik3da40c72015-06-22 00:31:26 -04005437 if (attr->ia_size != inode->i_size) {
Jan Kara52083862013-08-17 10:07:17 -04005438 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5439 if (IS_ERR(handle)) {
5440 error = PTR_ERR(handle);
Jan Karab9c1c262019-05-30 11:56:23 -04005441 goto out_mmap_sem;
Jan Kara52083862013-08-17 10:07:17 -04005442 }
Josef Bacik3da40c72015-06-22 00:31:26 -04005443 if (ext4_handle_valid(handle) && shrink) {
Jan Kara52083862013-08-17 10:07:17 -04005444 error = ext4_orphan_add(handle, inode);
5445 orphan = 1;
5446 }
Eryu Guan911af572015-07-28 15:08:41 -04005447 /*
5448 * Update c/mtime on truncate up, ext4_truncate() will
5449 * update c/mtime in shrink case below
5450 */
5451 if (!shrink) {
Deepa Dinamanieeca7ea2016-11-14 21:40:10 -05005452 inode->i_mtime = current_time(inode);
Eryu Guan911af572015-07-28 15:08:41 -04005453 inode->i_ctime = inode->i_mtime;
5454 }
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005455
5456 if (shrink)
Harshad Shirwadkara80f7fc2020-11-05 19:58:53 -08005457 ext4_fc_track_range(handle, inode,
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005458 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5459 inode->i_sb->s_blocksize_bits,
5460 (oldsize > 0 ? oldsize - 1 : 0) >>
5461 inode->i_sb->s_blocksize_bits);
5462 else
5463 ext4_fc_track_range(
Harshad Shirwadkara80f7fc2020-11-05 19:58:53 -08005464 handle, inode,
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005465 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5466 inode->i_sb->s_blocksize_bits,
5467 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5468 inode->i_sb->s_blocksize_bits);
5469
Jan Kara90e775b2013-08-17 10:09:31 -04005470 down_write(&EXT4_I(inode)->i_data_sem);
Jan Kara52083862013-08-17 10:07:17 -04005471 EXT4_I(inode)->i_disksize = attr->ia_size;
5472 rc = ext4_mark_inode_dirty(handle, inode);
5473 if (!error)
5474 error = rc;
Jan Kara90e775b2013-08-17 10:09:31 -04005475 /*
5476 * We have to update i_size under i_data_sem together
5477 * with i_disksize to avoid races with writeback code
5478 * running ext4_wb_update_i_disksize().
5479 */
5480 if (!error)
5481 i_size_write(inode, attr->ia_size);
5482 up_write(&EXT4_I(inode)->i_data_sem);
Jan Kara52083862013-08-17 10:07:17 -04005483 ext4_journal_stop(handle);
Jan Karab9c1c262019-05-30 11:56:23 -04005484 if (error)
5485 goto out_mmap_sem;
5486 if (!shrink) {
5487 pagecache_isize_extended(inode, oldsize,
5488 inode->i_size);
5489 } else if (ext4_should_journal_data(inode)) {
5490 ext4_wait_for_tail_page_commit(inode);
Jan Kara678aaf42008-07-11 19:27:31 -04005491 }
Jan Karad6320cb2014-10-01 21:49:46 -04005492 }
Ross Zwisler430657b2018-07-29 17:00:22 -04005493
Jan Kara52083862013-08-17 10:07:17 -04005494 /*
5495 * Truncate pagecache after we've waited for commit
5496 * in data=journal mode to make pages freeable.
5497 */
Ross Zwisler923ae0f2015-02-16 15:59:38 -08005498 truncate_pagecache(inode, inode->i_size);
Jan Karab9c1c262019-05-30 11:56:23 -04005499 /*
5500 * Call ext4_truncate() even if i_size didn't change to
5501 * truncate possible preallocated blocks.
5502 */
5503 if (attr->ia_size <= oldsize) {
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05005504 rc = ext4_truncate(inode);
5505 if (rc)
5506 error = rc;
5507 }
Jan Karab9c1c262019-05-30 11:56:23 -04005508out_mmap_sem:
Jan Karaea3d7202015-12-07 14:28:03 -05005509 up_write(&EXT4_I(inode)->i_mmap_sem);
Theodore Ts'o072bd7e2011-05-23 15:13:02 -04005510 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005511
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05005512 if (!error) {
Christian Brauner14f3db52021-01-21 14:19:57 +01005513 setattr_copy(mnt_userns, inode, attr);
Christoph Hellwig10257742010-06-04 11:30:02 +02005514 mark_inode_dirty(inode);
5515 }
5516
5517 /*
5518 * If the call to ext4_truncate failed to get a transaction handle at
5519 * all, we need to clean up the in-core orphan list manually.
5520 */
Dmitry Monakhov3d287de2010-10-27 22:08:46 -04005521 if (orphan && inode->i_nlink)
Mingming Cao617ba132006-10-11 01:20:53 -07005522 ext4_orphan_del(NULL, inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005523
Theodore Ts'o2c98eb52016-11-13 22:02:26 -05005524 if (!error && (ia_valid & ATTR_MODE))
Christian Brauner14f3db52021-01-21 14:19:57 +01005525 rc = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005526
5527err_out:
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005528 if (error)
5529 ext4_std_error(inode->i_sb, error);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005530 if (!error)
5531 error = rc;
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005532 ext4_fc_stop_update(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005533 return error;
5534}
5535
Christian Brauner549c7292021-01-21 14:19:43 +01005536int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
5537 struct kstat *stat, u32 request_mask, unsigned int query_flags)
Mingming Cao3e3398a2008-07-11 19:27:31 -04005538{
David Howells99652ea2017-03-31 18:31:56 +01005539 struct inode *inode = d_inode(path->dentry);
5540 struct ext4_inode *raw_inode;
5541 struct ext4_inode_info *ei = EXT4_I(inode);
5542 unsigned int flags;
Mingming Cao3e3398a2008-07-11 19:27:31 -04005543
Theodore Ts'od4c5e962019-11-28 22:26:51 -05005544 if ((request_mask & STATX_BTIME) &&
5545 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
David Howells99652ea2017-03-31 18:31:56 +01005546 stat->result_mask |= STATX_BTIME;
5547 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5548 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5549 }
5550
5551 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5552 if (flags & EXT4_APPEND_FL)
5553 stat->attributes |= STATX_ATTR_APPEND;
5554 if (flags & EXT4_COMPR_FL)
5555 stat->attributes |= STATX_ATTR_COMPRESSED;
5556 if (flags & EXT4_ENCRYPT_FL)
5557 stat->attributes |= STATX_ATTR_ENCRYPTED;
5558 if (flags & EXT4_IMMUTABLE_FL)
5559 stat->attributes |= STATX_ATTR_IMMUTABLE;
5560 if (flags & EXT4_NODUMP_FL)
5561 stat->attributes |= STATX_ATTR_NODUMP;
Eric Biggers1f607192019-10-29 13:41:39 -07005562 if (flags & EXT4_VERITY_FL)
5563 stat->attributes |= STATX_ATTR_VERITY;
David Howells99652ea2017-03-31 18:31:56 +01005564
David Howells3209f682017-03-31 18:32:17 +01005565 stat->attributes_mask |= (STATX_ATTR_APPEND |
5566 STATX_ATTR_COMPRESSED |
5567 STATX_ATTR_ENCRYPTED |
5568 STATX_ATTR_IMMUTABLE |
Eric Biggers1f607192019-10-29 13:41:39 -07005569 STATX_ATTR_NODUMP |
5570 STATX_ATTR_VERITY);
David Howells3209f682017-03-31 18:32:17 +01005571
Christian Brauner14f3db52021-01-21 14:19:57 +01005572 generic_fillattr(mnt_userns, inode, stat);
David Howells99652ea2017-03-31 18:31:56 +01005573 return 0;
5574}
5575
Christian Brauner549c7292021-01-21 14:19:43 +01005576int ext4_file_getattr(struct user_namespace *mnt_userns,
5577 const struct path *path, struct kstat *stat,
David Howells99652ea2017-03-31 18:31:56 +01005578 u32 request_mask, unsigned int query_flags)
5579{
5580 struct inode *inode = d_inode(path->dentry);
5581 u64 delalloc_blocks;
5582
Christian Brauner14f3db52021-01-21 14:19:57 +01005583 ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
Mingming Cao3e3398a2008-07-11 19:27:31 -04005584
5585 /*
Andreas Dilger9206c562013-11-11 22:38:12 -05005586 * If there is inline data in the inode, the inode will normally not
5587 * have data blocks allocated (it may have an external xattr block).
5588 * Report at least one sector for such files, so tools like tar, rsync,
Theodore Ts'od67d64f2017-03-25 17:33:31 -04005589 * others don't incorrectly think the file is completely sparse.
Andreas Dilger9206c562013-11-11 22:38:12 -05005590 */
5591 if (unlikely(ext4_has_inline_data(inode)))
5592 stat->blocks += (stat->size + 511) >> 9;
5593
5594 /*
Mingming Cao3e3398a2008-07-11 19:27:31 -04005595 * We can't update i_blocks if the block allocation is delayed
5596 * otherwise in the case of system crash before the real block
5597 * allocation is done, we will have i_blocks inconsistent with
5598 * on-disk file blocks.
5599 * We always keep i_blocks updated together with real
5600 * allocation. But to not confuse with user, stat
5601 * will return the blocks that include the delayed allocation
5602 * blocks for this file.
5603 */
Tao Ma96607552012-05-31 22:54:16 -04005604 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
Andreas Dilger9206c562013-11-11 22:38:12 -05005605 EXT4_I(inode)->i_reserved_data_blocks);
5606 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
Mingming Cao3e3398a2008-07-11 19:27:31 -04005607 return 0;
5608}
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005609
Jan Karafffb2732013-06-04 13:01:11 -04005610static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5611 int pextents)
Mingming Caoa02908f2008-08-19 22:16:07 -04005612{
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04005613 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
Jan Karafffb2732013-06-04 13:01:11 -04005614 return ext4_ind_trans_blocks(inode, lblocks);
5615 return ext4_ext_index_trans_blocks(inode, pextents);
Mingming Caoa02908f2008-08-19 22:16:07 -04005616}
Theodore Ts'oac51d832008-11-06 16:49:36 -05005617
Mingming Caoa02908f2008-08-19 22:16:07 -04005618/*
5619 * Account for index blocks, block groups bitmaps and block group
5620 * descriptor blocks if modify datablocks and index blocks
5621 * worse case, the indexs blocks spread over different block groups
5622 *
5623 * If datablocks are discontiguous, they are possible to spread over
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005624 * different block groups too. If they are contiguous, with flexbg,
Mingming Caoa02908f2008-08-19 22:16:07 -04005625 * they could still across block group boundary.
5626 *
5627 * Also account for superblock, inode, quota and xattr blocks
5628 */
Tahsin Erdogandec214d2017-06-22 11:44:55 -04005629static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
Jan Karafffb2732013-06-04 13:01:11 -04005630 int pextents)
Mingming Caoa02908f2008-08-19 22:16:07 -04005631{
Theodore Ts'o8df96752009-05-01 08:50:38 -04005632 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5633 int gdpblocks;
Mingming Caoa02908f2008-08-19 22:16:07 -04005634 int idxblocks;
5635 int ret = 0;
5636
5637 /*
Jan Karafffb2732013-06-04 13:01:11 -04005638 * How many index blocks need to touch to map @lblocks logical blocks
5639 * to @pextents physical extents?
Mingming Caoa02908f2008-08-19 22:16:07 -04005640 */
Jan Karafffb2732013-06-04 13:01:11 -04005641 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
Mingming Caoa02908f2008-08-19 22:16:07 -04005642
5643 ret = idxblocks;
5644
5645 /*
5646 * Now let's see how many group bitmaps and group descriptors need
5647 * to account
5648 */
Jan Karafffb2732013-06-04 13:01:11 -04005649 groups = idxblocks + pextents;
Mingming Caoa02908f2008-08-19 22:16:07 -04005650 gdpblocks = groups;
Theodore Ts'o8df96752009-05-01 08:50:38 -04005651 if (groups > ngroups)
5652 groups = ngroups;
Mingming Caoa02908f2008-08-19 22:16:07 -04005653 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5654 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5655
5656 /* bitmaps and block group descriptor blocks */
5657 ret += groups + gdpblocks;
5658
5659 /* Blocks for super block, inode, quota and xattr blocks */
5660 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005661
5662 return ret;
5663}
5664
5665/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005666 * Calculate the total number of credits to reserve to fit
Mingming Caof3bd1f32008-08-19 22:16:03 -04005667 * the modification of a single pages into a single transaction,
5668 * which may include multiple chunks of block allocations.
Mingming Caoa02908f2008-08-19 22:16:07 -04005669 *
Mingming Cao525f4ed2008-08-19 22:15:58 -04005670 * This could be called via ext4_write_begin()
Mingming Caoa02908f2008-08-19 22:16:07 -04005671 *
Mingming Cao525f4ed2008-08-19 22:15:58 -04005672 * We need to consider the worse case, when
Mingming Caoa02908f2008-08-19 22:16:07 -04005673 * one new block per extent.
Mingming Caoa02908f2008-08-19 22:16:07 -04005674 */
5675int ext4_writepage_trans_blocks(struct inode *inode)
5676{
5677 int bpp = ext4_journal_blocks_per_page(inode);
5678 int ret;
5679
Jan Karafffb2732013-06-04 13:01:11 -04005680 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
Mingming Caoa02908f2008-08-19 22:16:07 -04005681
5682 /* Account for data blocks for journalled mode */
5683 if (ext4_should_journal_data(inode))
5684 ret += bpp;
5685 return ret;
5686}
Mingming Caof3bd1f32008-08-19 22:16:03 -04005687
5688/*
5689 * Calculate the journal credits for a chunk of data modification.
5690 *
5691 * This is called from DIO, fallocate or whoever calling
Eric Sandeen79e83032010-07-27 11:56:07 -04005692 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
Mingming Caof3bd1f32008-08-19 22:16:03 -04005693 *
5694 * journal buffers for data blocks are not included here, as DIO
5695 * and fallocate do no need to journal data buffers.
5696 */
5697int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5698{
5699 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5700}
5701
Mingming Caoa02908f2008-08-19 22:16:07 -04005702/*
Mingming Cao617ba132006-10-11 01:20:53 -07005703 * The caller must have previously called ext4_reserve_inode_write().
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005704 * Give this, we know that the caller already has write access to iloc->bh.
5705 */
Mingming Cao617ba132006-10-11 01:20:53 -07005706int ext4_mark_iloc_dirty(handle_t *handle,
Theodore Ts'ode9a55b2009-06-14 17:45:34 -04005707 struct inode *inode, struct ext4_iloc *iloc)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005708{
5709 int err = 0;
5710
Vasily Averina6758302018-11-06 16:49:50 -05005711 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5712 put_bh(iloc->bh);
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05005713 return -EIO;
Vasily Averina6758302018-11-06 16:49:50 -05005714 }
Harshad Shirwadkara80f7fc2020-11-05 19:58:53 -08005715 ext4_fc_track_inode(handle, inode);
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07005716
Theodore Ts'oc64db502012-03-02 12:23:11 -05005717 if (IS_I_VERSION(inode))
Jean Noel Cordenner25ec56b2008-01-28 23:58:27 -05005718 inode_inc_iversion(inode);
5719
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005720 /* the do_update_inode consumes one bh->b_count */
5721 get_bh(iloc->bh);
5722
Mingming Caodab291a2006-10-11 01:21:01 -07005723 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
Frank Mayhar830156c2009-09-29 10:07:47 -04005724 err = ext4_do_update_inode(handle, inode, iloc);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005725 put_bh(iloc->bh);
5726 return err;
5727}
5728
5729/*
5730 * On success, We end up with an outstanding reference count against
5731 * iloc->bh. This _must_ be cleaned up later.
5732 */
5733
5734int
Mingming Cao617ba132006-10-11 01:20:53 -07005735ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5736 struct ext4_iloc *iloc)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005737{
Frank Mayhar03901312009-01-07 00:06:22 -05005738 int err;
5739
Theodore Ts'o0db1ff22017-02-05 01:28:48 -05005740 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5741 return -EIO;
5742
Frank Mayhar03901312009-01-07 00:06:22 -05005743 err = ext4_get_inode_loc(inode, iloc);
5744 if (!err) {
5745 BUFFER_TRACE(iloc->bh, "get_write_access");
5746 err = ext4_journal_get_write_access(handle, iloc->bh);
5747 if (err) {
5748 brelse(iloc->bh);
5749 iloc->bh = NULL;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005750 }
5751 }
Mingming Cao617ba132006-10-11 01:20:53 -07005752 ext4_std_error(inode->i_sb, err);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005753 return err;
5754}
5755
Miao Xiec03b45b2017-08-06 01:00:49 -04005756static int __ext4_expand_extra_isize(struct inode *inode,
5757 unsigned int new_extra_isize,
5758 struct ext4_iloc *iloc,
5759 handle_t *handle, int *no_expand)
5760{
5761 struct ext4_inode *raw_inode;
5762 struct ext4_xattr_ibody_header *header;
Theodore Ts'o4ea99932019-11-07 21:43:41 -05005763 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5764 struct ext4_inode_info *ei = EXT4_I(inode);
Miao Xiec03b45b2017-08-06 01:00:49 -04005765 int error;
5766
Theodore Ts'o4ea99932019-11-07 21:43:41 -05005767 /* this was checked at iget time, but double check for good measure */
5768 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5769 (ei->i_extra_isize & 3)) {
5770 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5771 ei->i_extra_isize,
5772 EXT4_INODE_SIZE(inode->i_sb));
5773 return -EFSCORRUPTED;
5774 }
5775 if ((new_extra_isize < ei->i_extra_isize) ||
5776 (new_extra_isize < 4) ||
5777 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5778 return -EINVAL; /* Should never happen */
5779
Miao Xiec03b45b2017-08-06 01:00:49 -04005780 raw_inode = ext4_raw_inode(iloc);
5781
5782 header = IHDR(inode, raw_inode);
5783
5784 /* No extended attributes present */
5785 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5786 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5787 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5788 EXT4_I(inode)->i_extra_isize, 0,
5789 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5790 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5791 return 0;
5792 }
5793
5794 /* try to expand with EAs present */
5795 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5796 raw_inode, handle);
5797 if (error) {
5798 /*
5799 * Inode size expansion failed; don't try again
5800 */
5801 *no_expand = 1;
5802 }
5803
5804 return error;
5805}
5806
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005807/*
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005808 * Expand an inode by new_extra_isize bytes.
5809 * Returns 0 on success or negative error number on failure.
5810 */
Miao Xiecf0a5e82017-08-06 00:40:01 -04005811static int ext4_try_to_expand_extra_isize(struct inode *inode,
5812 unsigned int new_extra_isize,
5813 struct ext4_iloc iloc,
5814 handle_t *handle)
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005815{
Miao Xie3b10fdc2017-08-06 00:27:38 -04005816 int no_expand;
5817 int error;
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005818
Miao Xiecf0a5e82017-08-06 00:40:01 -04005819 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5820 return -EOVERFLOW;
5821
5822 /*
5823 * In nojournal mode, we can immediately attempt to expand
5824 * the inode. When journaled, we first need to obtain extra
5825 * buffer credits since we may write into the EA block
5826 * with this same handle. If journal_extend fails, then it will
5827 * only result in a minor loss of functionality for that inode.
5828 * If this is felt to be critical, then e2fsck should be run to
5829 * force a large enough s_min_extra_isize.
5830 */
Jan Kara6cb367c2019-11-05 17:44:14 +01005831 if (ext4_journal_extend(handle,
Jan Kara83448bd2019-11-05 17:44:29 +01005832 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
Miao Xiecf0a5e82017-08-06 00:40:01 -04005833 return -ENOSPC;
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005834
Miao Xie3b10fdc2017-08-06 00:27:38 -04005835 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
Miao Xiecf0a5e82017-08-06 00:40:01 -04005836 return -EBUSY;
Miao Xie3b10fdc2017-08-06 00:27:38 -04005837
Miao Xiec03b45b2017-08-06 01:00:49 -04005838 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5839 handle, &no_expand);
Miao Xie3b10fdc2017-08-06 00:27:38 -04005840 ext4_write_unlock_xattr(inode, &no_expand);
Miao Xiecf0a5e82017-08-06 00:40:01 -04005841
Miao Xie3b10fdc2017-08-06 00:27:38 -04005842 return error;
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005843}
5844
Miao Xiec03b45b2017-08-06 01:00:49 -04005845int ext4_expand_extra_isize(struct inode *inode,
5846 unsigned int new_extra_isize,
5847 struct ext4_iloc *iloc)
5848{
5849 handle_t *handle;
5850 int no_expand;
5851 int error, rc;
5852
5853 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5854 brelse(iloc->bh);
5855 return -EOVERFLOW;
5856 }
5857
5858 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5859 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5860 if (IS_ERR(handle)) {
5861 error = PTR_ERR(handle);
5862 brelse(iloc->bh);
5863 return error;
5864 }
5865
5866 ext4_write_lock_xattr(inode, &no_expand);
5867
zhangyi (F)ddccb6d2019-02-21 11:29:10 -05005868 BUFFER_TRACE(iloc->bh, "get_write_access");
Miao Xiec03b45b2017-08-06 01:00:49 -04005869 error = ext4_journal_get_write_access(handle, iloc->bh);
5870 if (error) {
5871 brelse(iloc->bh);
Dan Carpenter7f420d642019-12-13 21:50:11 +03005872 goto out_unlock;
Miao Xiec03b45b2017-08-06 01:00:49 -04005873 }
5874
5875 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5876 handle, &no_expand);
5877
5878 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5879 if (!error)
5880 error = rc;
5881
Dan Carpenter7f420d642019-12-13 21:50:11 +03005882out_unlock:
Miao Xiec03b45b2017-08-06 01:00:49 -04005883 ext4_write_unlock_xattr(inode, &no_expand);
Miao Xiec03b45b2017-08-06 01:00:49 -04005884 ext4_journal_stop(handle);
5885 return error;
5886}
5887
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005888/*
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005889 * What we do here is to mark the in-core inode as clean with respect to inode
5890 * dirtiness (it may still be data-dirty).
5891 * This means that the in-core inode may be reaped by prune_icache
5892 * without having to perform any I/O. This is a very good thing,
5893 * because *any* task may call prune_icache - even ones which
5894 * have a transaction open against a different journal.
5895 *
5896 * Is this cheating? Not really. Sure, we haven't written the
5897 * inode out, but prune_icache isn't a user-visible syncing function.
5898 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5899 * we start and wait on commits.
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005900 */
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07005901int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5902 const char *func, unsigned int line)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005903{
Mingming Cao617ba132006-10-11 01:20:53 -07005904 struct ext4_iloc iloc;
Kalpak Shah6dd4ee72007-07-18 09:19:57 -04005905 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Miao Xiecf0a5e82017-08-06 00:40:01 -04005906 int err;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005907
5908 might_sleep();
Theodore Ts'o7ff9c072010-11-08 13:51:33 -05005909 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
Mingming Cao617ba132006-10-11 01:20:53 -07005910 err = ext4_reserve_inode_write(handle, inode, &iloc);
Eryu Guan5e1021f2016-03-12 21:40:32 -05005911 if (err)
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07005912 goto out;
Miao Xiecf0a5e82017-08-06 00:40:01 -04005913
5914 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5915 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5916 iloc, handle);
5917
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -07005918 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5919out:
5920 if (unlikely(err))
5921 ext4_error_inode_err(inode, func, line, 0, err,
5922 "mark_inode_dirty error");
5923 return err;
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005924}
5925
5926/*
Mingming Cao617ba132006-10-11 01:20:53 -07005927 * ext4_dirty_inode() is called from __mark_inode_dirty()
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005928 *
5929 * We're really interested in the case where a file is being extended.
5930 * i_size has been changed by generic_commit_write() and we thus need
5931 * to include the updated inode in the current transaction.
5932 *
Christoph Hellwig5dd40562010-03-03 09:05:00 -05005933 * Also, dquot_alloc_block() will always dirty the inode when blocks
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005934 * are allocated to the file.
5935 *
5936 * If the inode is marked synchronous, we don't honour that here - doing
5937 * so would cause a commit on atime updates, which we don't bother doing.
5938 * We handle synchronous inodes at the highest possible level.
5939 */
Christoph Hellwigaa385722011-05-27 06:53:02 -04005940void ext4_dirty_inode(struct inode *inode, int flags)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005941{
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005942 handle_t *handle;
5943
Theodore Ts'o9924a922013-02-08 21:59:22 -05005944 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005945 if (IS_ERR(handle))
Eric Biggerse2728c52021-01-12 11:02:47 -08005946 return;
Curt Wohlgemuthf3dc2722009-09-29 16:06:01 -04005947 ext4_mark_inode_dirty(handle, inode);
Mingming Cao617ba132006-10-11 01:20:53 -07005948 ext4_journal_stop(handle);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005949}
5950
Mingming Cao617ba132006-10-11 01:20:53 -07005951int ext4_change_inode_journal_flag(struct inode *inode, int val)
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005952{
5953 journal_t *journal;
5954 handle_t *handle;
5955 int err;
Daeho Jeongc8585c62016-04-25 23:22:35 -04005956 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005957
5958 /*
5959 * We have to be very careful here: changing a data block's
5960 * journaling status dynamically is dangerous. If we write a
5961 * data block to the journal, change the status and then delete
5962 * that block, we risk forgetting to revoke the old log record
5963 * from the journal and so a subsequent replay can corrupt data.
5964 * So, first we make sure that the journal is empty and that
5965 * nobody is changing anything.
5966 */
5967
Mingming Cao617ba132006-10-11 01:20:53 -07005968 journal = EXT4_JOURNAL(inode);
Frank Mayhar03901312009-01-07 00:06:22 -05005969 if (!journal)
5970 return 0;
Dave Hansend6995942007-07-18 08:33:51 -04005971 if (is_journal_aborted(journal))
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005972 return -EROFS;
5973
Dmitry Monakhov17335dc2012-09-29 00:41:21 -04005974 /* Wait for all existing dio workers */
Dmitry Monakhov17335dc2012-09-29 00:41:21 -04005975 inode_dio_wait(inode);
5976
Daeho Jeong4c546592016-04-25 23:21:00 -04005977 /*
5978 * Before flushing the journal and switching inode's aops, we have
5979 * to flush all dirty data the inode has. There can be outstanding
5980 * delayed allocations, there can be unwritten extents created by
5981 * fallocate or buffered writes in dioread_nolock mode covered by
5982 * dirty data which can be converted only after flushing the dirty
5983 * data (and journalled aops don't know how to handle these cases).
5984 */
5985 if (val) {
5986 down_write(&EXT4_I(inode)->i_mmap_sem);
5987 err = filemap_write_and_wait(inode->i_mapping);
5988 if (err < 0) {
5989 up_write(&EXT4_I(inode)->i_mmap_sem);
Daeho Jeong4c546592016-04-25 23:21:00 -04005990 return err;
5991 }
5992 }
5993
Eric Biggersbbd55932020-02-19 10:30:46 -08005994 percpu_down_write(&sbi->s_writepages_rwsem);
Mingming Caodab291a2006-10-11 01:21:01 -07005995 jbd2_journal_lock_updates(journal);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07005996
5997 /*
5998 * OK, there are no updates running now, and all cached data is
5999 * synced to disk. We are now in a completely consistent state
6000 * which doesn't have anything in the journal, and we know that
6001 * no filesystem updates are running, so it is safe to modify
6002 * the inode's in-core data-journaling state flag now.
6003 */
6004
6005 if (val)
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04006006 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
Yongqiang Yang5872dda2011-12-28 13:55:51 -05006007 else {
Jan Kara4f879ca2014-10-30 10:53:17 -04006008 err = jbd2_journal_flush(journal);
6009 if (err < 0) {
6010 jbd2_journal_unlock_updates(journal);
Eric Biggersbbd55932020-02-19 10:30:46 -08006011 percpu_up_write(&sbi->s_writepages_rwsem);
Jan Kara4f879ca2014-10-30 10:53:17 -04006012 return err;
6013 }
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04006014 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
Yongqiang Yang5872dda2011-12-28 13:55:51 -05006015 }
Mingming Cao617ba132006-10-11 01:20:53 -07006016 ext4_set_aops(inode);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07006017
Mingming Caodab291a2006-10-11 01:21:01 -07006018 jbd2_journal_unlock_updates(journal);
Eric Biggersbbd55932020-02-19 10:30:46 -08006019 percpu_up_write(&sbi->s_writepages_rwsem);
Daeho Jeongc8585c62016-04-25 23:22:35 -04006020
Daeho Jeong4c546592016-04-25 23:21:00 -04006021 if (val)
6022 up_write(&EXT4_I(inode)->i_mmap_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07006023
6024 /* Finally we can mark the inode as dirty. */
6025
Theodore Ts'o9924a922013-02-08 21:59:22 -05006026 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07006027 if (IS_ERR(handle))
6028 return PTR_ERR(handle);
6029
Harshad Shirwadkaraa75f4d2020-10-15 13:37:57 -07006030 ext4_fc_mark_ineligible(inode->i_sb,
6031 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
Mingming Cao617ba132006-10-11 01:20:53 -07006032 err = ext4_mark_inode_dirty(handle, inode);
Frank Mayhar03901312009-01-07 00:06:22 -05006033 ext4_handle_sync(handle);
Mingming Cao617ba132006-10-11 01:20:53 -07006034 ext4_journal_stop(handle);
6035 ext4_std_error(inode->i_sb, err);
Dave Kleikampac27a0e2006-10-11 01:20:50 -07006036
6037 return err;
6038}
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006039
6040static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
6041{
6042 return !buffer_mapped(bh);
6043}
6044
Souptick Joarder401b25a2018-10-02 22:20:50 -04006045vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006046{
Dave Jiang11bac802017-02-24 14:56:41 -08006047 struct vm_area_struct *vma = vmf->vma;
Nick Pigginc2ec1752009-03-31 15:23:21 -07006048 struct page *page = vmf->page;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006049 loff_t size;
6050 unsigned long len;
Souptick Joarder401b25a2018-10-02 22:20:50 -04006051 int err;
6052 vm_fault_t ret;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006053 struct file *file = vma->vm_file;
Al Viro496ad9a2013-01-23 17:07:38 -05006054 struct inode *inode = file_inode(file);
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006055 struct address_space *mapping = inode->i_mapping;
Jan Kara9ea7df52011-06-24 14:29:41 -04006056 handle_t *handle;
6057 get_block_t *get_block;
6058 int retries = 0;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006059
Theodore Ts'o02b016c2019-06-09 22:04:33 -04006060 if (unlikely(IS_IMMUTABLE(inode)))
6061 return VM_FAULT_SIGBUS;
6062
Jan Kara8e8ad8a2012-06-12 16:20:38 +02006063 sb_start_pagefault(inode->i_sb);
Theodore Ts'o041bbb6d2012-09-30 23:04:56 -04006064 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -05006065
6066 down_read(&EXT4_I(inode)->i_mmap_sem);
Eric Biggers7b4cc972017-04-30 00:10:50 -04006067
Souptick Joarder401b25a2018-10-02 22:20:50 -04006068 err = ext4_convert_inline_data(inode);
6069 if (err)
Eric Biggers7b4cc972017-04-30 00:10:50 -04006070 goto out_ret;
6071
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006072 /*
6073 * On data journalling we skip straight to the transaction handle:
6074 * there's no delalloc; page truncated will be checked later; the
6075 * early return w/ all buffers mapped (calculates size/len) can't
6076 * be used; and there's no dioread_nolock, so only ext4_get_block.
6077 */
6078 if (ext4_should_journal_data(inode))
6079 goto retry_alloc;
6080
Jan Kara9ea7df52011-06-24 14:29:41 -04006081 /* Delalloc case is easy... */
6082 if (test_opt(inode->i_sb, DELALLOC) &&
Jan Kara9ea7df52011-06-24 14:29:41 -04006083 !ext4_nonda_switch(inode->i_sb)) {
6084 do {
Souptick Joarder401b25a2018-10-02 22:20:50 -04006085 err = block_page_mkwrite(vma, vmf,
Jan Kara9ea7df52011-06-24 14:29:41 -04006086 ext4_da_get_block_prep);
Souptick Joarder401b25a2018-10-02 22:20:50 -04006087 } while (err == -ENOSPC &&
Jan Kara9ea7df52011-06-24 14:29:41 -04006088 ext4_should_retry_alloc(inode->i_sb, &retries));
6089 goto out_ret;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006090 }
Darrick J. Wong0e499892011-05-18 13:55:20 -04006091
6092 lock_page(page);
Jan Kara9ea7df52011-06-24 14:29:41 -04006093 size = i_size_read(inode);
6094 /* Page got truncated from under us? */
6095 if (page->mapping != mapping || page_offset(page) > size) {
6096 unlock_page(page);
6097 ret = VM_FAULT_NOPAGE;
6098 goto out;
Darrick J. Wong0e499892011-05-18 13:55:20 -04006099 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006100
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006101 if (page->index == size >> PAGE_SHIFT)
6102 len = size & ~PAGE_MASK;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006103 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03006104 len = PAGE_SIZE;
Aneesh Kumar K.Va827eaf2009-09-09 22:36:03 -04006105 /*
Jan Kara9ea7df52011-06-24 14:29:41 -04006106 * Return if we have all the buffers mapped. This avoids the need to do
6107 * journal_start/journal_stop which can block and take a long time
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006108 *
6109 * This cannot be done for data journalling, as we have to add the
6110 * inode to the transaction's list to writeprotect pages on commit.
Aneesh Kumar K.Va827eaf2009-09-09 22:36:03 -04006111 */
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006112 if (page_has_buffers(page)) {
Tao Maf19d5872012-12-10 14:05:51 -05006113 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6114 0, len, NULL,
6115 ext4_bh_unmapped)) {
Jan Kara9ea7df52011-06-24 14:29:41 -04006116 /* Wait so that we don't change page under IO */
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -08006117 wait_for_stable_page(page);
Jan Kara9ea7df52011-06-24 14:29:41 -04006118 ret = VM_FAULT_LOCKED;
6119 goto out;
Aneesh Kumar K.Va827eaf2009-09-09 22:36:03 -04006120 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006121 }
Aneesh Kumar K.Va827eaf2009-09-09 22:36:03 -04006122 unlock_page(page);
Jan Kara9ea7df52011-06-24 14:29:41 -04006123 /* OK, we need to fill the hole... */
6124 if (ext4_should_dioread_nolock(inode))
Jan Kara705965b2016-03-08 23:08:10 -05006125 get_block = ext4_get_block_unwritten;
Jan Kara9ea7df52011-06-24 14:29:41 -04006126 else
6127 get_block = ext4_get_block;
6128retry_alloc:
Theodore Ts'o9924a922013-02-08 21:59:22 -05006129 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6130 ext4_writepage_trans_blocks(inode));
Jan Kara9ea7df52011-06-24 14:29:41 -04006131 if (IS_ERR(handle)) {
Nick Pigginc2ec1752009-03-31 15:23:21 -07006132 ret = VM_FAULT_SIGBUS;
Jan Kara9ea7df52011-06-24 14:29:41 -04006133 goto out;
6134 }
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006135 /*
6136 * Data journalling can't use block_page_mkwrite() because it
6137 * will set_buffer_dirty() before do_journal_get_write_access()
6138 * thus might hit warning messages for dirty metadata buffers.
6139 */
6140 if (!ext4_should_journal_data(inode)) {
6141 err = block_page_mkwrite(vma, vmf, get_block);
6142 } else {
6143 lock_page(page);
6144 size = i_size_read(inode);
6145 /* Page got truncated from under us? */
6146 if (page->mapping != mapping || page_offset(page) > size) {
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006147 ret = VM_FAULT_NOPAGE;
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03006148 goto out_error;
Jan Kara9ea7df52011-06-24 14:29:41 -04006149 }
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006150
6151 if (page->index == size >> PAGE_SHIFT)
6152 len = size & ~PAGE_MASK;
6153 else
6154 len = PAGE_SIZE;
6155
6156 err = __block_write_begin(page, 0, len, ext4_get_block);
6157 if (!err) {
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03006158 ret = VM_FAULT_SIGBUS;
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006159 if (ext4_walk_page_buffers(handle, page_buffers(page),
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03006160 0, len, NULL, do_journal_get_write_access))
6161 goto out_error;
6162 if (ext4_walk_page_buffers(handle, page_buffers(page),
6163 0, len, NULL, write_end_fn))
6164 goto out_error;
Jan Karab5b18162020-10-27 14:27:51 +01006165 if (ext4_jbd2_inode_add_write(handle, inode,
6166 page_offset(page), len))
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03006167 goto out_error;
Mauricio Faria de Oliveira64a9f142020-10-05 21:48:40 -03006168 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6169 } else {
6170 unlock_page(page);
6171 }
Jan Kara9ea7df52011-06-24 14:29:41 -04006172 }
6173 ext4_journal_stop(handle);
Souptick Joarder401b25a2018-10-02 22:20:50 -04006174 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
Jan Kara9ea7df52011-06-24 14:29:41 -04006175 goto retry_alloc;
6176out_ret:
Souptick Joarder401b25a2018-10-02 22:20:50 -04006177 ret = block_page_mkwrite_return(err);
Jan Kara9ea7df52011-06-24 14:29:41 -04006178out:
Jan Karaea3d7202015-12-07 14:28:03 -05006179 up_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara8e8ad8a2012-06-12 16:20:38 +02006180 sb_end_pagefault(inode->i_sb);
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006181 return ret;
Mauricio Faria de Oliveiraafb585a2020-10-05 21:48:41 -03006182out_error:
6183 unlock_page(page);
6184 ext4_journal_stop(handle);
6185 goto out;
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -04006186}
Jan Karaea3d7202015-12-07 14:28:03 -05006187
Souptick Joarder401b25a2018-10-02 22:20:50 -04006188vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
Jan Karaea3d7202015-12-07 14:28:03 -05006189{
Dave Jiang11bac802017-02-24 14:56:41 -08006190 struct inode *inode = file_inode(vmf->vma->vm_file);
Souptick Joarder401b25a2018-10-02 22:20:50 -04006191 vm_fault_t ret;
Jan Karaea3d7202015-12-07 14:28:03 -05006192
6193 down_read(&EXT4_I(inode)->i_mmap_sem);
Souptick Joarder401b25a2018-10-02 22:20:50 -04006194 ret = filemap_fault(vmf);
Jan Karaea3d7202015-12-07 14:28:03 -05006195 up_read(&EXT4_I(inode)->i_mmap_sem);
6196
Souptick Joarder401b25a2018-10-02 22:20:50 -04006197 return ret;
Jan Karaea3d7202015-12-07 14:28:03 -05006198}