blob: 7e0b4f81c6c066a23ecb1b26a94dc4fba192b19c [file] [log] [blame]
Theodore Ts'of5166762017-12-17 22:00:59 -05001// SPDX-License-Identifier: LGPL-2.1
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -05002/*
3 * Copyright IBM Corporation, 2007
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -05006 */
7
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -04009#include "ext4_jbd2.h"
Theodore Ts'o4a092d72012-11-28 13:03:30 -050010#include "ext4_extents.h"
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050011
12/*
13 * The contiguous blocks details which can be
14 * represented by a single extent
15 */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040016struct migrate_struct {
17 ext4_lblk_t first_block, last_block, curr_block;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050018 ext4_fsblk_t first_pblock, last_pblock;
19};
20
21static int finish_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040022 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050023
24{
25 int retval = 0, needed;
26 struct ext4_extent newext;
27 struct ext4_ext_path *path;
28 if (lb->first_pblock == 0)
29 return 0;
30
31 /* Add the extent to temp inode*/
32 newext.ee_block = cpu_to_le32(lb->first_block);
33 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
34 ext4_ext_store_pblock(&newext, lb->first_pblock);
Bhaskar Chowdhury3088e5a2021-03-27 16:00:05 +053035 /* Locking only for convenience since we are operating on temp inode */
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040036 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oed8a1a72014-09-01 14:43:09 -040037 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050038 if (IS_ERR(path)) {
39 retval = PTR_ERR(path);
Aneesh Kumar K.Vb35905c2008-02-25 16:54:37 -050040 path = NULL;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050041 goto err_out;
42 }
43
44 /*
45 * Calculate the credit needed to inserting this extent
Bhaskar Chowdhury3088e5a2021-03-27 16:00:05 +053046 * Since we are doing this in loop we may accumulate extra
47 * credit. But below we try to not accumulate too much
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050048 * of them by restarting the journal.
49 */
Mingming Caoee12b632008-08-19 22:16:05 -040050 needed = ext4_ext_calc_credits_for_single_extent(inode,
51 lb->last_block - lb->first_block + 1, path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050052
Jan Kara83448bd2019-11-05 17:44:29 +010053 retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
Jan Karaa4130362019-11-05 17:44:16 +010054 if (retval < 0)
55 goto err_out;
Theodore Ts'odfe50802014-09-01 14:37:09 -040056 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050057err_out:
Dmitry Monakhov4b1f1662014-07-27 22:28:15 -040058 up_write((&EXT4_I(inode)->i_data_sem));
Theodore Ts'ob7ea89a2014-09-01 14:39:09 -040059 ext4_ext_drop_refs(path);
60 kfree(path);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050061 lb->first_pblock = 0;
62 return retval;
63}
64
65static int update_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040066 ext4_fsblk_t pblock, struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050067{
68 int retval;
69 /*
70 * See if we can add on to the existing range (if it exists)
71 */
72 if (lb->first_pblock &&
73 (lb->last_pblock+1 == pblock) &&
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040074 (lb->last_block+1 == lb->curr_block)) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050075 lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040076 lb->last_block = lb->curr_block;
77 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050078 return 0;
79 }
80 /*
81 * Start a new range.
82 */
83 retval = finish_range(handle, inode, lb);
84 lb->first_pblock = lb->last_pblock = pblock;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040085 lb->first_block = lb->last_block = lb->curr_block;
86 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050087 return retval;
88}
89
90static int update_ind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -040091 ext4_fsblk_t pblock,
92 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050093{
94 struct buffer_head *bh;
95 __le32 *i_data;
96 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -050097 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
98
Theodore Ts'ofb265c92018-11-25 17:20:31 -050099 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
100 if (IS_ERR(bh))
101 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500102
103 i_data = (__le32 *)bh->b_data;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400104 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500105 if (i_data[i]) {
106 retval = update_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400107 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500108 if (retval)
109 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400110 } else {
111 lb->curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500112 }
113 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500114 put_bh(bh);
115 return retval;
116
117}
118
119static int update_dind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400120 ext4_fsblk_t pblock,
121 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500122{
123 struct buffer_head *bh;
124 __le32 *i_data;
125 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500126 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
127
Theodore Ts'ofb265c92018-11-25 17:20:31 -0500128 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
129 if (IS_ERR(bh))
130 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500131
132 i_data = (__le32 *)bh->b_data;
133 for (i = 0; i < max_entries; i++) {
134 if (i_data[i]) {
135 retval = update_ind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400136 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500137 if (retval)
138 break;
139 } else {
140 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400141 lb->curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500142 }
143 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500144 put_bh(bh);
145 return retval;
146
147}
148
149static int update_tind_extent_range(handle_t *handle, struct inode *inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400150 ext4_fsblk_t pblock,
151 struct migrate_struct *lb)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500152{
153 struct buffer_head *bh;
154 __le32 *i_data;
155 int i, retval = 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500156 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
157
Theodore Ts'ofb265c92018-11-25 17:20:31 -0500158 bh = ext4_sb_bread(inode->i_sb, pblock, 0);
159 if (IS_ERR(bh))
160 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500161
162 i_data = (__le32 *)bh->b_data;
163 for (i = 0; i < max_entries; i++) {
164 if (i_data[i]) {
165 retval = update_dind_extent_range(handle, inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400166 le32_to_cpu(i_data[i]), lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500167 if (retval)
168 break;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400169 } else {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500170 /* Only update the file block number */
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400171 lb->curr_block += max_entries * max_entries;
172 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500173 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500174 put_bh(bh);
175 return retval;
176
177}
178
179static int free_dind_blocks(handle_t *handle,
180 struct inode *inode, __le32 i_data)
181{
182 int i;
183 __le32 *tmp_idata;
184 struct buffer_head *bh;
Jan Kara83448bd2019-11-05 17:44:29 +0100185 struct super_block *sb = inode->i_sb;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500186 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
Jan Karaa4130362019-11-05 17:44:16 +0100187 int err;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500188
Jan Kara83448bd2019-11-05 17:44:29 +0100189 bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
Theodore Ts'ofb265c92018-11-25 17:20:31 -0500190 if (IS_ERR(bh))
191 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500192
193 tmp_idata = (__le32 *)bh->b_data;
194 for (i = 0; i < max_entries; i++) {
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500195 if (tmp_idata[i]) {
Jan Karaa4130362019-11-05 17:44:16 +0100196 err = ext4_journal_ensure_credits(handle,
Jan Kara83448bd2019-11-05 17:44:29 +0100197 EXT4_RESERVE_TRANS_BLOCKS,
198 ext4_free_metadata_revoke_credits(sb, 1));
Jan Karaa4130362019-11-05 17:44:16 +0100199 if (err < 0) {
200 put_bh(bh);
201 return err;
202 }
Peter Huewe7dc57612011-02-21 21:01:42 -0500203 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500204 le32_to_cpu(tmp_idata[i]), 1,
205 EXT4_FREE_BLOCKS_METADATA |
206 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500207 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500208 }
209 put_bh(bh);
Jan Kara83448bd2019-11-05 17:44:29 +0100210 err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
211 ext4_free_metadata_revoke_credits(sb, 1));
Jan Karaa4130362019-11-05 17:44:16 +0100212 if (err < 0)
213 return err;
Peter Huewe7dc57612011-02-21 21:01:42 -0500214 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500215 EXT4_FREE_BLOCKS_METADATA |
216 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500217 return 0;
218}
219
220static int free_tind_blocks(handle_t *handle,
221 struct inode *inode, __le32 i_data)
222{
223 int i, retval = 0;
224 __le32 *tmp_idata;
225 struct buffer_head *bh;
226 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
227
Theodore Ts'ofb265c92018-11-25 17:20:31 -0500228 bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
229 if (IS_ERR(bh))
230 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500231
232 tmp_idata = (__le32 *)bh->b_data;
233 for (i = 0; i < max_entries; i++) {
234 if (tmp_idata[i]) {
235 retval = free_dind_blocks(handle,
236 inode, tmp_idata[i]);
237 if (retval) {
238 put_bh(bh);
239 return retval;
240 }
241 }
242 }
243 put_bh(bh);
Jan Kara83448bd2019-11-05 17:44:29 +0100244 retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
245 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
Jan Karaa4130362019-11-05 17:44:16 +0100246 if (retval < 0)
247 return retval;
Peter Huewe7dc57612011-02-21 21:01:42 -0500248 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500249 EXT4_FREE_BLOCKS_METADATA |
250 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500251 return 0;
252}
253
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500254static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500255{
256 int retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500257
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500258 /* ei->i_data[EXT4_IND_BLOCK] */
259 if (i_data[0]) {
Jan Karaa4130362019-11-05 17:44:16 +0100260 retval = ext4_journal_ensure_credits(handle,
Jan Kara83448bd2019-11-05 17:44:29 +0100261 EXT4_RESERVE_TRANS_BLOCKS,
262 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
Jan Karaa4130362019-11-05 17:44:16 +0100263 if (retval < 0)
264 return retval;
Peter Huewe7dc57612011-02-21 21:01:42 -0500265 ext4_free_blocks(handle, inode, NULL,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500266 le32_to_cpu(i_data[0]), 1,
267 EXT4_FREE_BLOCKS_METADATA |
268 EXT4_FREE_BLOCKS_FORGET);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500269 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500270
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500271 /* ei->i_data[EXT4_DIND_BLOCK] */
272 if (i_data[1]) {
273 retval = free_dind_blocks(handle, inode, i_data[1]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500274 if (retval)
275 return retval;
276 }
277
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500278 /* ei->i_data[EXT4_TIND_BLOCK] */
279 if (i_data[2]) {
280 retval = free_tind_blocks(handle, inode, i_data[2]);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500281 if (retval)
282 return retval;
283 }
284 return 0;
285}
286
287static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400288 struct inode *tmp_inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500289{
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -0700290 int retval, retval2 = 0;
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500291 __le32 i_data[3];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500292 struct ext4_inode_info *ei = EXT4_I(inode);
293 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
294
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500295 /*
296 * One credit accounted for writing the
297 * i_data field of the original inode
298 */
Jan Kara83448bd2019-11-05 17:44:29 +0100299 retval = ext4_journal_ensure_credits(handle, 1, 0);
Jan Karaa4130362019-11-05 17:44:16 +0100300 if (retval < 0)
301 goto err_out;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500302
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500303 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
304 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
305 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
306
307 down_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500308 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400309 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400310 * happened after we started the migrate. We need to
311 * fail the migrate
312 */
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500313 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400314 retval = -EAGAIN;
315 up_write(&EXT4_I(inode)->i_data_sem);
316 goto err_out;
317 } else
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500318 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400319 /*
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500320 * We have the extent map build with the tmp inode.
321 * Now copy the i_data across
322 */
Theodore Ts'o74e4e6d2011-05-03 09:34:42 -0400323 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500324 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
325
326 /*
327 * Update i_blocks with the new blocks that got
328 * allocated while adding extents for extent index
329 * blocks.
330 *
331 * While converting to extents we need not
Adam Buchbinderb8a074632016-03-09 23:49:05 -0500332 * update the original inode i_blocks for extent blocks
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500333 * via quota APIs. The quota update happened via tmp_inode already.
334 */
335 spin_lock(&inode->i_lock);
336 inode->i_blocks += tmp_inode->i_blocks;
337 spin_unlock(&inode->i_lock);
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500338 up_write(&EXT4_I(inode)->i_data_sem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500339
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500340 /*
341 * We mark the inode dirty after, because we decrement the
342 * i_blocks when freeing the indirect meta-data blocks
343 */
344 retval = free_ind_block(handle, inode, i_data);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -0700345 retval2 = ext4_mark_inode_dirty(handle, inode);
346 if (unlikely(retval2 && !retval))
347 retval = retval2;
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500348
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500349err_out:
350 return retval;
351}
352
353static int free_ext_idx(handle_t *handle, struct inode *inode,
354 struct ext4_extent_idx *ix)
355{
356 int i, retval = 0;
357 ext4_fsblk_t block;
358 struct buffer_head *bh;
359 struct ext4_extent_header *eh;
360
Theodore Ts'obf89d162010-10-27 21:30:14 -0400361 block = ext4_idx_pblock(ix);
Theodore Ts'ofb265c92018-11-25 17:20:31 -0500362 bh = ext4_sb_bread(inode->i_sb, block, 0);
363 if (IS_ERR(bh))
364 return PTR_ERR(bh);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500365
366 eh = (struct ext4_extent_header *)bh->b_data;
367 if (eh->eh_depth != 0) {
368 ix = EXT_FIRST_INDEX(eh);
369 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
370 retval = free_ext_idx(handle, inode, ix);
Jan Karaa4130362019-11-05 17:44:16 +0100371 if (retval) {
372 put_bh(bh);
373 return retval;
374 }
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500375 }
376 }
377 put_bh(bh);
Jan Kara83448bd2019-11-05 17:44:29 +0100378 retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
379 ext4_free_metadata_revoke_credits(inode->i_sb, 1));
Jan Karaa4130362019-11-05 17:44:16 +0100380 if (retval < 0)
381 return retval;
Peter Huewe7dc57612011-02-21 21:01:42 -0500382 ext4_free_blocks(handle, inode, NULL, block, 1,
Theodore Ts'oe6362602009-11-23 07:17:05 -0500383 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
Jan Karaa4130362019-11-05 17:44:16 +0100384 return 0;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500385}
386
387/*
388 * Free the extent meta data blocks only
389 */
390static int free_ext_block(handle_t *handle, struct inode *inode)
391{
392 int i, retval = 0;
393 struct ext4_inode_info *ei = EXT4_I(inode);
394 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
395 struct ext4_extent_idx *ix;
396 if (eh->eh_depth == 0)
397 /*
398 * No extra blocks allocated for extent meta data
399 */
400 return 0;
401 ix = EXT_FIRST_INDEX(eh);
402 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
403 retval = free_ext_idx(handle, inode, ix);
404 if (retval)
405 return retval;
406 }
407 return retval;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500408}
409
Aneesh Kumar K.V2a43a872008-09-13 12:52:26 -0400410int ext4_ext_migrate(struct inode *inode)
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500411{
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800412 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500413 handle_t *handle;
414 int retval = 0, i;
415 __le32 *i_data;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500416 struct ext4_inode_info *ei;
417 struct inode *tmp_inode = NULL;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400418 struct migrate_struct lb;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500419 unsigned long max_entries;
Andreas Dilger11013912009-06-13 11:45:35 -0400420 __u32 goal;
Dmitry Monakhov5cb81da2011-10-29 09:05:00 -0400421 uid_t owner[2];
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500422
Theodore Ts'o83982b62009-01-06 14:53:16 -0500423 /*
424 * If the filesystem does not support extents, or the inode
425 * already is extent-based, error out.
426 */
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400427 if (!ext4_has_feature_extents(inode->i_sb) ||
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400428 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500429 return -EINVAL;
430
Valerie Clementb8356c42008-02-05 10:56:37 -0500431 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
432 /*
433 * don't migrate fast symlink
434 */
435 return retval;
436
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800437 percpu_down_write(&sbi->s_writepages_rwsem);
438
Theodore Ts'o4b217632013-02-09 12:50:27 -0500439 /*
440 * Worst case we can touch the allocation bitmaps, a bgd
441 * block, and a block to link in the orphan list. We do need
442 * need to worry about credits for modifying the quota inode.
443 */
Theodore Ts'o9924a922013-02-08 21:59:22 -0500444 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
Theodore Ts'o4b217632013-02-09 12:50:27 -0500445 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
446
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500447 if (IS_ERR(handle)) {
448 retval = PTR_ERR(handle);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800449 goto out_unlock;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500450 }
Andreas Dilger11013912009-06-13 11:45:35 -0400451 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
452 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
Eric W. Biederman08cefc72012-02-07 15:41:49 -0800453 owner[0] = i_uid_read(inode);
454 owner[1] = i_gid_read(inode);
David Howells2b0143b2015-03-17 22:25:59 +0000455 tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
Tahsin Erdogan1b917ed2017-06-21 21:21:39 -0400456 S_IFREG, NULL, goal, owner, 0);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500457 if (IS_ERR(tmp_inode)) {
Dan Carpentera0cc9102012-02-20 17:53:06 -0500458 retval = PTR_ERR(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500459 ext4_journal_stop(handle);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800460 goto out_unlock;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500461 }
462 i_size_write(tmp_inode, i_size_read(inode));
463 /*
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500464 * Set the i_nlink to zero so it will be deleted later
465 * when we drop inode reference.
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500466 */
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +0200467 clear_nlink(tmp_inode);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500468
469 ext4_ext_tree_init(handle, tmp_inode);
470 ext4_orphan_add(handle, tmp_inode);
471 ext4_journal_stop(handle);
472
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500473 /*
474 * start with one credit accounted for
475 * superblock modification.
476 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300477 * For the tmp_inode we already have committed the
Anatol Pomozov70261f52013-08-28 14:40:12 -0400478 * transaction that created the inode. Later as and
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500479 * when we add extents we extent the journal
480 */
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500481 /*
Theodore Ts'o1b9c12f2009-09-17 08:32:22 -0400482 * Even though we take i_mutex we can still cause block
483 * allocation via mmap write to holes. If we have allocated
484 * new blocks we fail migrate. New block allocation will
485 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
486 * with i_data_sem held to prevent racing with block
487 * allocation.
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400488 */
Lukas Czernerc8b459f2014-05-12 12:55:07 -0400489 down_read(&EXT4_I(inode)->i_data_sem);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -0500490 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400491 up_read((&EXT4_I(inode)->i_data_sem));
492
Theodore Ts'o9924a922013-02-08 21:59:22 -0500493 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500494 if (IS_ERR(handle)) {
495 /*
496 * It is impossible to update on-disk structures without
497 * a handle, so just rollback in-core changes and live other
498 * work to orphan_list_cleanup()
499 */
500 ext4_orphan_del(NULL, tmp_inode);
501 retval = PTR_ERR(handle);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800502 goto out_tmp_inode;
Dmitry Monakhovf39490b2010-03-01 23:14:36 -0500503 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500504
505 ei = EXT4_I(inode);
506 i_data = ei->i_data;
507 memset(&lb, 0, sizeof(lb));
508
509 /* 32 bit block address 4 bytes */
510 max_entries = inode->i_sb->s_blocksize >> 2;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400511 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500512 if (i_data[i]) {
513 retval = update_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400514 le32_to_cpu(i_data[i]), &lb);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500515 if (retval)
516 goto err_out;
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400517 } else
518 lb.curr_block++;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500519 }
520 if (i_data[EXT4_IND_BLOCK]) {
521 retval = update_ind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400522 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
Colin Ian Kinga92abd72018-12-04 00:16:44 -0500523 if (retval)
524 goto err_out;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500525 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400526 lb.curr_block += max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500527 if (i_data[EXT4_DIND_BLOCK]) {
528 retval = update_dind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400529 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
Colin Ian Kinga92abd72018-12-04 00:16:44 -0500530 if (retval)
531 goto err_out;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500532 } else
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400533 lb.curr_block += max_entries * max_entries;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500534 if (i_data[EXT4_TIND_BLOCK]) {
535 retval = update_tind_extent_range(handle, tmp_inode,
Dmitry Monakhovfba90ff2011-10-29 09:03:00 -0400536 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
Colin Ian Kinga92abd72018-12-04 00:16:44 -0500537 if (retval)
538 goto err_out;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500539 }
540 /*
541 * Build the last extent
542 */
543 retval = finish_range(handle, tmp_inode, &lb);
544err_out:
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500545 if (retval)
546 /*
547 * Failure case delete the extent information with the
548 * tmp_inode
549 */
550 free_ext_block(handle, tmp_inode);
Aneesh Kumar K.V267e4db2008-04-29 08:11:12 -0400551 else {
552 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
553 if (retval)
554 /*
555 * if we fail to swap inode data free the extent
556 * details of the tmp inode
557 */
558 free_ext_block(handle, tmp_inode);
559 }
Aneesh Kumar K.V8009f9f2008-02-10 01:20:05 -0500560
561 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
Jan Kara83448bd2019-11-05 17:44:29 +0100562 retval = ext4_journal_ensure_credits(handle, 1, 0);
Jan Karaa4130362019-11-05 17:44:16 +0100563 if (retval < 0)
564 goto out_stop;
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500565 /*
566 * Mark the tmp_inode as of size zero
567 */
568 i_size_write(tmp_inode, 0);
569
570 /*
571 * set the i_blocks count to zero
Wang Shilong58d86a52014-11-25 16:17:29 -0500572 * so that the ext4_evict_inode() does the
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500573 * right job
574 *
575 * We don't need to take the i_lock because
576 * the inode is not visible to user space.
577 */
578 tmp_inode->i_blocks = 0;
579
580 /* Reset the extent details */
581 ext4_ext_tree_init(handle, tmp_inode);
Jan Karaa4130362019-11-05 17:44:16 +0100582out_stop:
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500583 ext4_journal_stop(handle);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800584out_tmp_inode:
Aneesh Kumar K.Va8526e82009-08-25 22:36:05 -0400585 unlock_new_inode(tmp_inode);
Dan Carpenter09054262009-02-15 20:02:19 -0500586 iput(tmp_inode);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800587out_unlock:
588 percpu_up_write(&sbi->s_writepages_rwsem);
Aneesh Kumar K.Vc14c6fd2008-01-28 23:58:26 -0500589 return retval;
590}
Lukas Czerner0d14b092013-04-10 23:32:52 -0400591
592/*
593 * Migrate a simple extent-based inode to use the i_blocks[] array
594 */
595int ext4_ind_migrate(struct inode *inode)
596{
597 struct ext4_extent_header *eh;
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800598 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
599 struct ext4_super_block *es = sbi->s_es;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400600 struct ext4_inode_info *ei = EXT4_I(inode);
601 struct ext4_extent *ex;
602 unsigned int i, len;
Eryu Guan8974fec2015-07-04 00:03:44 -0400603 ext4_lblk_t start, end;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400604 ext4_fsblk_t blk;
605 handle_t *handle;
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -0700606 int ret, ret2 = 0;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400607
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400608 if (!ext4_has_feature_extents(inode->i_sb) ||
Lukas Czerner0d14b092013-04-10 23:32:52 -0400609 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
610 return -EINVAL;
611
Darrick J. Wonge2b911c2015-10-17 16:18:43 -0400612 if (ext4_has_feature_bigalloc(inode->i_sb))
Lukas Czerner43e50f52013-04-11 10:54:46 -0400613 return -EOPNOTSUPP;
614
Eryu Guand6f123a2015-07-03 23:56:50 -0400615 /*
616 * In order to get correct extent info, force all delayed allocation
617 * blocks to be allocated, otherwise delayed allocation blocks may not
618 * be reflected and bypass the checks on extent header.
619 */
620 if (test_opt(inode->i_sb, DELALLOC))
621 ext4_alloc_da_blocks(inode);
622
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800623 percpu_down_write(&sbi->s_writepages_rwsem);
624
Lukas Czerner0d14b092013-04-10 23:32:52 -0400625 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800626 if (IS_ERR(handle)) {
627 ret = PTR_ERR(handle);
628 goto out_unlock;
629 }
Lukas Czerner0d14b092013-04-10 23:32:52 -0400630
631 down_write(&EXT4_I(inode)->i_data_sem);
632 ret = ext4_ext_check_inode(inode);
633 if (ret)
634 goto errout;
635
636 eh = ext_inode_hdr(inode);
637 ex = EXT_FIRST_EXTENT(eh);
638 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
639 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
640 ret = -EOPNOTSUPP;
641 goto errout;
642 }
643 if (eh->eh_entries == 0)
Eryu Guan8974fec2015-07-04 00:03:44 -0400644 blk = len = start = end = 0;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400645 else {
646 len = le16_to_cpu(ex->ee_len);
647 blk = ext4_ext_pblock(ex);
Eryu Guan8974fec2015-07-04 00:03:44 -0400648 start = le32_to_cpu(ex->ee_block);
649 end = start + len - 1;
Eryu Guand6f123a2015-07-03 23:56:50 -0400650 if (end >= EXT4_NDIR_BLOCKS) {
Lukas Czerner0d14b092013-04-10 23:32:52 -0400651 ret = -EOPNOTSUPP;
652 goto errout;
653 }
654 }
655
656 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
657 memset(ei->i_data, 0, sizeof(ei->i_data));
Eryu Guan8974fec2015-07-04 00:03:44 -0400658 for (i = start; i <= end; i++)
Lukas Czerner0d14b092013-04-10 23:32:52 -0400659 ei->i_data[i] = cpu_to_le32(blk++);
Harshad Shirwadkar4209ae12020-04-26 18:34:37 -0700660 ret2 = ext4_mark_inode_dirty(handle, inode);
661 if (unlikely(ret2 && !ret))
662 ret = ret2;
Lukas Czerner0d14b092013-04-10 23:32:52 -0400663errout:
664 ext4_journal_stop(handle);
665 up_write(&EXT4_I(inode)->i_data_sem);
Eric Biggerscb85f4d2020-02-19 10:30:47 -0800666out_unlock:
667 percpu_up_write(&sbi->s_writepages_rwsem);
Lukas Czerner0d14b092013-04-10 23:32:52 -0400668 return ret;
669}