blob: 64a579734f934a9ffdcd4a36076cc1ef2bbe1dbb [file] [log] [blame]
Theodore Ts'of5166762017-12-17 22:00:59 -05001// SPDX-License-Identifier: LGPL-2.1
Akira Fujita748de672009-06-17 19:24:03 -04002/*
3 * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd.
4 * Written by Takashi Sato <t-sato@yk.jp.nec.com>
5 * Akira Fujita <a-fujita@rs.jp.nec.com>
Akira Fujita748de672009-06-17 19:24:03 -04006 */
7
8#include <linux/fs.h>
9#include <linux/quotaops.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Akira Fujita748de672009-06-17 19:24:03 -040011#include "ext4_jbd2.h"
Akira Fujita748de672009-06-17 19:24:03 -040012#include "ext4.h"
Theodore Ts'o4a092d72012-11-28 13:03:30 -050013#include "ext4_extents.h"
Akira Fujita748de672009-06-17 19:24:03 -040014
Akira Fujitae8505972009-09-16 13:46:38 -040015/**
Theodore Ts'oc60990b2019-06-19 16:30:03 -040016 * get_ext_path() - Find an extent path for designated logical block number.
17 * @inode: inode to be searched
Akira Fujitae8505972009-09-16 13:46:38 -040018 * @lblock: logical block number to find an extent path
Theodore Ts'oc60990b2019-06-19 16:30:03 -040019 * @ppath: pointer to an extent path pointer (for output)
Akira Fujitae8505972009-09-16 13:46:38 -040020 *
Theodore Ts'oed8a1a72014-09-01 14:43:09 -040021 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
Akira Fujitae8505972009-09-16 13:46:38 -040022 * on failure.
23 */
24static inline int
25get_ext_path(struct inode *inode, ext4_lblk_t lblock,
Theodore Ts'o3bdf14b2014-09-01 14:42:09 -040026 struct ext4_ext_path **ppath)
Akira Fujitae8505972009-09-16 13:46:38 -040027{
Dmitry Monakhov0e401102013-03-18 11:40:19 -040028 struct ext4_ext_path *path;
Akira Fujitae8505972009-09-16 13:46:38 -040029
Theodore Ts'oed8a1a72014-09-01 14:43:09 -040030 path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
Dmitry Monakhov0e401102013-03-18 11:40:19 -040031 if (IS_ERR(path))
Theodore Ts'o3bdf14b2014-09-01 14:42:09 -040032 return PTR_ERR(path);
33 if (path[ext_depth(inode)].p_ext == NULL) {
34 ext4_ext_drop_refs(path);
35 kfree(path);
36 *ppath = NULL;
37 return -ENODATA;
38 }
39 *ppath = path;
40 return 0;
Akira Fujitae8505972009-09-16 13:46:38 -040041}
Akira Fujita748de672009-06-17 19:24:03 -040042
43/**
Theodore Ts'oc60990b2019-06-19 16:30:03 -040044 * ext4_double_down_write_data_sem() - write lock two inodes's i_data_sem
45 * @first: inode to be locked
46 * @second: inode to be locked
Akira Fujita748de672009-06-17 19:24:03 -040047 *
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -040048 * Acquire write lock of i_data_sem of the two inodes
Akira Fujita748de672009-06-17 19:24:03 -040049 */
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -040050void
51ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
Akira Fujita748de672009-06-17 19:24:03 -040052{
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -040053 if (first < second) {
54 down_write(&EXT4_I(first)->i_data_sem);
Theodore Ts'odaf647d2016-04-01 01:31:28 -040055 down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -040056 } else {
57 down_write(&EXT4_I(second)->i_data_sem);
Theodore Ts'odaf647d2016-04-01 01:31:28 -040058 down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
Akira Fujita748de672009-06-17 19:24:03 -040059
Akira Fujita748de672009-06-17 19:24:03 -040060 }
Akira Fujita748de672009-06-17 19:24:03 -040061}
62
63/**
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -040064 * ext4_double_up_write_data_sem - Release two inodes' write lock of i_data_sem
Akira Fujita748de672009-06-17 19:24:03 -040065 *
66 * @orig_inode: original inode structure to be released its lock first
67 * @donor_inode: donor inode structure to be released its lock second
Akira Fujitafc04cb42009-11-23 07:24:43 -050068 * Release write lock of i_data_sem of two inodes (orig and donor).
Akira Fujita748de672009-06-17 19:24:03 -040069 */
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -040070void
71ext4_double_up_write_data_sem(struct inode *orig_inode,
72 struct inode *donor_inode)
Akira Fujita748de672009-06-17 19:24:03 -040073{
Akira Fujita748de672009-06-17 19:24:03 -040074 up_write(&EXT4_I(orig_inode)->i_data_sem);
75 up_write(&EXT4_I(donor_inode)->i_data_sem);
76}
77
78/**
Dmitry Monakhov8c854472012-09-26 12:54:52 -040079 * mext_check_coverage - Check that all extents in range has the same type
80 *
81 * @inode: inode in question
82 * @from: block offset of inode
83 * @count: block count to be checked
Lukas Czerner556615d2014-04-20 23:45:47 -040084 * @unwritten: extents expected to be unwritten
Dmitry Monakhov8c854472012-09-26 12:54:52 -040085 * @err: pointer to save error value
86 *
87 * Return 1 if all extents in range has expected type, and zero otherwise.
88 */
89static int
90mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
Lukas Czerner556615d2014-04-20 23:45:47 -040091 int unwritten, int *err)
Dmitry Monakhov8c854472012-09-26 12:54:52 -040092{
93 struct ext4_ext_path *path = NULL;
94 struct ext4_extent *ext;
Dmitry Monakhov0e401102013-03-18 11:40:19 -040095 int ret = 0;
Dmitry Monakhov8c854472012-09-26 12:54:52 -040096 ext4_lblk_t last = from + count;
97 while (from < last) {
98 *err = get_ext_path(inode, from, &path);
99 if (*err)
Dmitry Monakhov0e401102013-03-18 11:40:19 -0400100 goto out;
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400101 ext = path[ext_depth(inode)].p_ext;
Lukas Czerner556615d2014-04-20 23:45:47 -0400102 if (unwritten != ext4_ext_is_unwritten(ext))
Dmitry Monakhov0e401102013-03-18 11:40:19 -0400103 goto out;
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400104 from += ext4_ext_get_actual_len(ext);
105 ext4_ext_drop_refs(path);
106 }
Dmitry Monakhov0e401102013-03-18 11:40:19 -0400107 ret = 1;
108out:
Theodore Ts'ob7ea89a2014-09-01 14:39:09 -0400109 ext4_ext_drop_refs(path);
110 kfree(path);
Dmitry Monakhov0e401102013-03-18 11:40:19 -0400111 return ret;
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400112}
113
114/**
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400115 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
116 *
117 * @inode1: the inode structure
118 * @inode2: the inode structure
Xiaoguang Wang65dd8322014-10-11 19:56:34 -0400119 * @index1: page index
120 * @index2: page index
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400121 * @page: result page vector
122 *
123 * Grab two locked pages for inode's by inode order
124 */
125static int
126mext_page_double_lock(struct inode *inode1, struct inode *inode2,
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400127 pgoff_t index1, pgoff_t index2, struct page *page[2])
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400128{
129 struct address_space *mapping[2];
130 unsigned fl = AOP_FLAG_NOFS;
131
132 BUG_ON(!inode1 || !inode2);
133 if (inode1 < inode2) {
134 mapping[0] = inode1->i_mapping;
135 mapping[1] = inode2->i_mapping;
136 } else {
Gustavo A. R. Silva62bbdd92018-07-29 16:11:59 -0400137 swap(index1, index2);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400138 mapping[0] = inode2->i_mapping;
139 mapping[1] = inode1->i_mapping;
140 }
141
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400142 page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400143 if (!page[0])
144 return -ENOMEM;
145
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400146 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400147 if (!page[1]) {
148 unlock_page(page[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300149 put_page(page[0]);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400150 return -ENOMEM;
151 }
Dmitry Monakhov7e8b12c2013-04-11 23:24:58 -0400152 /*
153 * grab_cache_page_write_begin() may not wait on page's writeback if
154 * BDI not demand that. But it is reasonable to be very conservative
155 * here and explicitly wait on page's writeback
156 */
157 wait_on_page_writeback(page[0]);
158 wait_on_page_writeback(page[1]);
Fabian Frederickbf865462015-06-12 23:47:33 -0400159 if (inode1 > inode2)
160 swap(page[0], page[1]);
161
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400162 return 0;
163}
164
165/* Force page buffers uptodate w/o dropping page's lock */
166static int
167mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
168{
169 struct inode *inode = page->mapping->host;
170 sector_t block;
171 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
172 unsigned int blocksize, block_start, block_end;
173 int i, err, nr = 0, partial = 0;
174 BUG_ON(!PageLocked(page));
175 BUG_ON(PageWriteback(page));
176
177 if (PageUptodate(page))
178 return 0;
179
Fabian Frederick93407472017-02-27 14:28:32 -0800180 blocksize = i_blocksize(inode);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400181 if (!page_has_buffers(page))
182 create_empty_buffers(page, blocksize, 0);
183
184 head = page_buffers(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300185 block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400186 for (bh = head, block_start = 0; bh != head || !block_start;
187 block++, block_start = block_end, bh = bh->b_this_page) {
188 block_end = block_start + blocksize;
189 if (block_end <= from || block_start >= to) {
190 if (!buffer_uptodate(bh))
191 partial = 1;
192 continue;
193 }
194 if (buffer_uptodate(bh))
195 continue;
196 if (!buffer_mapped(bh)) {
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400197 err = ext4_get_block(inode, block, bh, 0);
198 if (err) {
199 SetPageError(page);
200 return err;
201 }
202 if (!buffer_mapped(bh)) {
203 zero_user(page, block_start, blocksize);
Dan Carpenterdf3a98b2014-02-17 20:46:40 -0500204 set_buffer_uptodate(bh);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400205 continue;
206 }
207 }
208 BUG_ON(nr >= MAX_BUF_PER_PAGE);
209 arr[nr++] = bh;
210 }
211 /* No io required */
212 if (!nr)
213 goto out;
214
215 for (i = 0; i < nr; i++) {
216 bh = arr[i];
217 if (!bh_uptodate_or_lock(bh)) {
zhangyi (F)2d069c02020-09-24 15:33:33 +0800218 err = ext4_read_bh(bh, 0, NULL);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400219 if (err)
220 return err;
221 }
222 }
223out:
224 if (!partial)
225 SetPageUptodate(page);
226 return 0;
227}
228
229/**
Akira Fujita748de672009-06-17 19:24:03 -0400230 * move_extent_per_page - Move extent data per page
231 *
232 * @o_filp: file structure of original file
233 * @donor_inode: donor inode
234 * @orig_page_offset: page index on original file
Xiaoguang Wang65dd8322014-10-11 19:56:34 -0400235 * @donor_page_offset: page index on donor file
Akira Fujita748de672009-06-17 19:24:03 -0400236 * @data_offset_in_page: block index where data swapping starts
237 * @block_len_in_page: the number of blocks to be swapped
Lukas Czerner556615d2014-04-20 23:45:47 -0400238 * @unwritten: orig extent is unwritten or not
Akira Fujitaf868a482009-11-23 07:25:48 -0500239 * @err: pointer to save return value
Akira Fujita748de672009-06-17 19:24:03 -0400240 *
241 * Save the data in original inode blocks and replace original inode extents
Xiaoguang Wang65dd8322014-10-11 19:56:34 -0400242 * with donor inode extents by calling ext4_swap_extents().
Akira Fujitaf868a482009-11-23 07:25:48 -0500243 * Finally, write out the saved data in new original inode blocks. Return
244 * replaced block count.
Akira Fujita748de672009-06-17 19:24:03 -0400245 */
246static int
Akira Fujita44fc48f2009-09-05 23:12:41 -0400247move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400248 pgoff_t orig_page_offset, pgoff_t donor_page_offset,
249 int data_offset_in_page,
250 int block_len_in_page, int unwritten, int *err)
Akira Fujita748de672009-06-17 19:24:03 -0400251{
Al Viro496ad9a2013-01-23 17:07:38 -0500252 struct inode *orig_inode = file_inode(o_filp);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400253 struct page *pagep[2] = {NULL, NULL};
Akira Fujita748de672009-06-17 19:24:03 -0400254 handle_t *handle;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400255 ext4_lblk_t orig_blk_offset, donor_blk_offset;
Akira Fujita748de672009-06-17 19:24:03 -0400256 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
Akira Fujitaf868a482009-11-23 07:25:48 -0500257 unsigned int tmp_data_size, data_size, replaced_size;
Eryu Guanbcff2482016-02-12 01:20:43 -0500258 int i, err2, jblocks, retries = 0;
Akira Fujitaf868a482009-11-23 07:25:48 -0500259 int replaced_count = 0;
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400260 int from = data_offset_in_page << orig_inode->i_blkbits;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300261 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
Dmitry Monakhov88c6b612014-11-05 11:52:38 -0500262 struct super_block *sb = orig_inode->i_sb;
Eryu Guanbcff2482016-02-12 01:20:43 -0500263 struct buffer_head *bh = NULL;
Akira Fujita748de672009-06-17 19:24:03 -0400264
265 /*
266 * It needs twice the amount of ordinary journal buffers because
267 * inode and donor_inode may change each different metadata blocks.
268 */
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400269again:
270 *err = 0;
Akira Fujita748de672009-06-17 19:24:03 -0400271 jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
Theodore Ts'o9924a922013-02-08 21:59:22 -0500272 handle = ext4_journal_start(orig_inode, EXT4_HT_MOVE_EXTENTS, jblocks);
Akira Fujita748de672009-06-17 19:24:03 -0400273 if (IS_ERR(handle)) {
Akira Fujitaf868a482009-11-23 07:25:48 -0500274 *err = PTR_ERR(handle);
275 return 0;
Akira Fujita748de672009-06-17 19:24:03 -0400276 }
277
Akira Fujita748de672009-06-17 19:24:03 -0400278 orig_blk_offset = orig_page_offset * blocks_per_page +
279 data_offset_in_page;
280
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400281 donor_blk_offset = donor_page_offset * blocks_per_page +
282 data_offset_in_page;
283
Akira Fujitaf868a482009-11-23 07:25:48 -0500284 /* Calculate data_size */
Akira Fujita748de672009-06-17 19:24:03 -0400285 if ((orig_blk_offset + block_len_in_page - 1) ==
286 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
287 /* Replace the last block */
Akira Fujitaf868a482009-11-23 07:25:48 -0500288 tmp_data_size = orig_inode->i_size & (blocksize - 1);
Akira Fujita748de672009-06-17 19:24:03 -0400289 /*
Akira Fujitaf868a482009-11-23 07:25:48 -0500290 * If data_size equal zero, it shows data_size is multiples of
Akira Fujita748de672009-06-17 19:24:03 -0400291 * blocksize. So we set appropriate value.
292 */
Akira Fujitaf868a482009-11-23 07:25:48 -0500293 if (tmp_data_size == 0)
294 tmp_data_size = blocksize;
Akira Fujita748de672009-06-17 19:24:03 -0400295
Akira Fujitaf868a482009-11-23 07:25:48 -0500296 data_size = tmp_data_size +
Akira Fujita748de672009-06-17 19:24:03 -0400297 ((block_len_in_page - 1) << orig_inode->i_blkbits);
Akira Fujitaf868a482009-11-23 07:25:48 -0500298 } else
299 data_size = block_len_in_page << orig_inode->i_blkbits;
Akira Fujita748de672009-06-17 19:24:03 -0400300
Akira Fujitaf868a482009-11-23 07:25:48 -0500301 replaced_size = data_size;
302
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400303 *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400304 donor_page_offset, pagep);
Akira Fujitaf868a482009-11-23 07:25:48 -0500305 if (unlikely(*err < 0))
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400306 goto stop_journal;
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400307 /*
Lukas Czerner556615d2014-04-20 23:45:47 -0400308 * If orig extent was unwritten it can become initialized
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400309 * at any time after i_data_sem was dropped, in order to
310 * serialize with delalloc we have recheck extent while we
311 * hold page's lock, if it is still the case data copy is not
312 * necessary, just swap data blocks between orig and donor.
313 */
Lukas Czerner556615d2014-04-20 23:45:47 -0400314 if (unwritten) {
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400315 ext4_double_down_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400316 /* If any of extents in range became initialized we have to
317 * fallback to data copying */
Lukas Czerner556615d2014-04-20 23:45:47 -0400318 unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
319 block_len_in_page, 1, err);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400320 if (*err)
321 goto drop_data_sem;
Akira Fujita748de672009-06-17 19:24:03 -0400322
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400323 unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
Lukas Czerner556615d2014-04-20 23:45:47 -0400324 block_len_in_page, 1, err);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400325 if (*err)
326 goto drop_data_sem;
327
Lukas Czerner556615d2014-04-20 23:45:47 -0400328 if (!unwritten) {
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400329 ext4_double_up_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400330 goto data_copy;
331 }
332 if ((page_has_private(pagep[0]) &&
333 !try_to_release_page(pagep[0], 0)) ||
334 (page_has_private(pagep[1]) &&
335 !try_to_release_page(pagep[1], 0))) {
336 *err = -EBUSY;
337 goto drop_data_sem;
338 }
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400339 replaced_count = ext4_swap_extents(handle, orig_inode,
340 donor_inode, orig_blk_offset,
341 donor_blk_offset,
342 block_len_in_page, 1, err);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400343 drop_data_sem:
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400344 ext4_double_up_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhov8c854472012-09-26 12:54:52 -0400345 goto unlock_pages;
346 }
347data_copy:
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400348 *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
349 if (*err)
350 goto unlock_pages;
351
352 /* At this point all buffers in range are uptodate, old mapping layout
353 * is no longer required, try to drop it now. */
354 if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
355 (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
356 *err = -EBUSY;
357 goto unlock_pages;
Akira Fujita748de672009-06-17 19:24:03 -0400358 }
Dmitry Monakhov6e263142014-07-27 22:32:27 -0400359 ext4_double_down_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400360 replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
361 orig_blk_offset, donor_blk_offset,
362 block_len_in_page, 1, err);
Dmitry Monakhov6e263142014-07-27 22:32:27 -0400363 ext4_double_up_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400364 if (*err) {
Akira Fujitaf868a482009-11-23 07:25:48 -0500365 if (replaced_count) {
366 block_len_in_page = replaced_count;
367 replaced_size =
368 block_len_in_page << orig_inode->i_blkbits;
Akira Fujitaac48b0a2009-11-24 10:31:56 -0500369 } else
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400370 goto unlock_pages;
Akira Fujitaf868a482009-11-23 07:25:48 -0500371 }
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400372 /* Perform all necessary steps similar write_begin()/write_end()
373 * but keeping in mind that i_size will not change */
Eryu Guanbcff2482016-02-12 01:20:43 -0500374 if (!page_has_buffers(pagep[0]))
375 create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
376 bh = page_buffers(pagep[0]);
377 for (i = 0; i < data_offset_in_page; i++)
378 bh = bh->b_this_page;
379 for (i = 0; i < block_len_in_page; i++) {
380 *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
381 if (*err < 0)
382 break;
Eryu Guan6ffe77b2016-02-21 18:38:44 -0500383 bh = bh->b_this_page;
Eryu Guanbcff2482016-02-12 01:20:43 -0500384 }
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400385 if (!*err)
386 *err = block_commit_write(pagep[0], from, from + replaced_size);
Akira Fujita748de672009-06-17 19:24:03 -0400387
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400388 if (unlikely(*err < 0))
389 goto repair_branches;
Akira Fujita748de672009-06-17 19:24:03 -0400390
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400391 /* Even in case of data=writeback it is reasonable to pin
392 * inode to transaction, to prevent unexpected data loss */
Ross Zwisler73131fb2019-06-20 17:26:26 -0400393 *err = ext4_jbd2_inode_add_write(handle, orig_inode,
394 (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
Akira Fujita748de672009-06-17 19:24:03 -0400395
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400396unlock_pages:
397 unlock_page(pagep[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300398 put_page(pagep[0]);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400399 unlock_page(pagep[1]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300400 put_page(pagep[1]);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400401stop_journal:
Akira Fujita748de672009-06-17 19:24:03 -0400402 ext4_journal_stop(handle);
Dmitry Monakhov88c6b612014-11-05 11:52:38 -0500403 if (*err == -ENOSPC &&
404 ext4_should_retry_alloc(sb, &retries))
405 goto again;
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400406 /* Buffer was busy because probably is pinned to journal transaction,
407 * force transaction commit may help to free it. */
Dmitry Monakhov88c6b612014-11-05 11:52:38 -0500408 if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
409 jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400410 goto again;
Akira Fujitaf868a482009-11-23 07:25:48 -0500411 return replaced_count;
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400412
413repair_branches:
414 /*
415 * This should never ever happen!
416 * Extents are swapped already, but we are not able to copy data.
417 * Try to swap extents to it's original places
418 */
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400419 ext4_double_down_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400420 replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
421 orig_blk_offset, donor_blk_offset,
422 block_len_in_page, 0, &err2);
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400423 ext4_double_up_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400424 if (replaced_count != block_len_in_page) {
Theodore Ts'o54d3adb2020-03-28 19:33:43 -0400425 ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
426 EIO, "Unable to copy data block,"
Dmitry Monakhovbb557482012-09-26 12:52:07 -0400427 " data will be lost.");
428 *err = -EIO;
429 }
430 replaced_count = 0;
431 goto unlock_pages;
Akira Fujita748de672009-06-17 19:24:03 -0400432}
433
434/**
Akira Fujitac437b272010-03-04 00:39:24 -0500435 * mext_check_arguments - Check whether move extent can be done
Akira Fujita748de672009-06-17 19:24:03 -0400436 *
437 * @orig_inode: original inode
438 * @donor_inode: donor inode
439 * @orig_start: logical start offset in block for orig
440 * @donor_start: logical start offset in block for donor
441 * @len: the number of blocks to be moved
Akira Fujita748de672009-06-17 19:24:03 -0400442 *
443 * Check the arguments of ext4_move_extents() whether the files can be
444 * exchanged with each other.
445 * Return 0 on success, or a negative error value on failure.
446 */
447static int
448mext_check_arguments(struct inode *orig_inode,
Kazuya Mio446aaa62009-11-24 10:28:48 -0500449 struct inode *donor_inode, __u64 orig_start,
450 __u64 donor_start, __u64 *len)
Akira Fujita748de672009-06-17 19:24:03 -0400451{
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400452 __u64 orig_eof, donor_eof;
Akira Fujita70d5d3d2009-09-16 14:28:22 -0400453 unsigned int blkbits = orig_inode->i_blkbits;
454 unsigned int blocksize = 1 << blkbits;
455
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400456 orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
457 donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
458
459
Akira Fujita4a585792009-12-06 23:38:31 -0500460 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
461 ext4_debug("ext4 move extent: suid or sgid is set"
462 " to donor file [ino:orig %lu, donor %lu]\n",
463 orig_inode->i_ino, donor_inode->i_ino);
464 return -EINVAL;
465 }
466
Theodore Ts'o1f5a81e2010-06-02 22:04:39 -0400467 if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
468 return -EPERM;
469
Akira Fujita748de672009-06-17 19:24:03 -0400470 /* Ext4 move extent does not support swapfile */
471 if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
472 ext4_debug("ext4 move extent: The argument files should "
473 "not be swapfile [ino:orig %lu, donor %lu]\n",
474 orig_inode->i_ino, donor_inode->i_ino);
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400475 return -EBUSY;
Akira Fujita748de672009-06-17 19:24:03 -0400476 }
477
Tahsin Erdogan02749a42017-06-22 11:31:25 -0400478 if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
Theodore Ts'odaf647d2016-04-01 01:31:28 -0400479 ext4_debug("ext4 move extent: The argument files should "
480 "not be quota files [ino:orig %lu, donor %lu]\n",
481 orig_inode->i_ino, donor_inode->i_ino);
482 return -EBUSY;
483 }
484
Akira Fujita748de672009-06-17 19:24:03 -0400485 /* Ext4 move extent supports only extent based file */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400486 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
Akira Fujita748de672009-06-17 19:24:03 -0400487 ext4_debug("ext4 move extent: orig file is not extents "
488 "based file [ino:orig %lu]\n", orig_inode->i_ino);
489 return -EOPNOTSUPP;
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400490 } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
Akira Fujita748de672009-06-17 19:24:03 -0400491 ext4_debug("ext4 move extent: donor file is not extents "
492 "based file [ino:donor %lu]\n", donor_inode->i_ino);
493 return -EOPNOTSUPP;
494 }
495
496 if ((!orig_inode->i_size) || (!donor_inode->i_size)) {
497 ext4_debug("ext4 move extent: File size is 0 byte\n");
498 return -EINVAL;
499 }
500
501 /* Start offset should be same */
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400502 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
503 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
Akira Fujita748de672009-06-17 19:24:03 -0400504 ext4_debug("ext4 move extent: orig and donor's start "
Theodore Ts'od67d64f2017-03-25 17:33:31 -0400505 "offsets are not aligned [ino:orig %lu, donor %lu]\n",
Akira Fujita748de672009-06-17 19:24:03 -0400506 orig_inode->i_ino, donor_inode->i_ino);
507 return -EINVAL;
508 }
509
Lukas Czernerf17722f2011-06-06 00:05:17 -0400510 if ((orig_start >= EXT_MAX_BLOCKS) ||
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400511 (donor_start >= EXT_MAX_BLOCKS) ||
Lukas Czernerf17722f2011-06-06 00:05:17 -0400512 (*len > EXT_MAX_BLOCKS) ||
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400513 (donor_start + *len >= EXT_MAX_BLOCKS) ||
Lukas Czernerf17722f2011-06-06 00:05:17 -0400514 (orig_start + *len >= EXT_MAX_BLOCKS)) {
Eric Sandeen0a80e982009-09-17 11:55:58 -0400515 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
Lukas Czernerf17722f2011-06-06 00:05:17 -0400516 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
Akira Fujita748de672009-06-17 19:24:03 -0400517 orig_inode->i_ino, donor_inode->i_ino);
518 return -EINVAL;
519 }
Theodore Ts'of18b2b82018-10-02 01:34:44 -0400520 if (orig_eof <= orig_start)
521 *len = 0;
522 else if (orig_eof < orig_start + *len - 1)
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400523 *len = orig_eof - orig_start;
Theodore Ts'of18b2b82018-10-02 01:34:44 -0400524 if (donor_eof <= donor_start)
525 *len = 0;
526 else if (donor_eof < donor_start + *len - 1)
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400527 *len = donor_eof - donor_start;
Akira Fujita748de672009-06-17 19:24:03 -0400528 if (!*len) {
Akira Fujita92c28152009-11-23 07:24:50 -0500529 ext4_debug("ext4 move extent: len should not be 0 "
Akira Fujita748de672009-06-17 19:24:03 -0400530 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
531 donor_inode->i_ino);
532 return -EINVAL;
533 }
534
535 return 0;
536}
537
538/**
Akira Fujita748de672009-06-17 19:24:03 -0400539 * ext4_move_extents - Exchange the specified range of a file
540 *
541 * @o_filp: file structure of the original file
542 * @d_filp: file structure of the donor file
Xiaoguang Wang65dd8322014-10-11 19:56:34 -0400543 * @orig_blk: start offset in block for orig
544 * @donor_blk: start offset in block for donor
Akira Fujita748de672009-06-17 19:24:03 -0400545 * @len: the number of blocks to be moved
546 * @moved_len: moved block length
547 *
548 * This function returns 0 and moved block length is set in moved_len
549 * if succeed, otherwise returns error value.
550 *
Akira Fujita748de672009-06-17 19:24:03 -0400551 */
552int
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400553ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
554 __u64 donor_blk, __u64 len, __u64 *moved_len)
Akira Fujita748de672009-06-17 19:24:03 -0400555{
Al Viro496ad9a2013-01-23 17:07:38 -0500556 struct inode *orig_inode = file_inode(o_filp);
557 struct inode *donor_inode = file_inode(d_filp);
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400558 struct ext4_ext_path *path = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300559 int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400560 ext4_lblk_t o_end, o_start = orig_blk;
561 ext4_lblk_t d_start = donor_blk;
562 int ret;
Akira Fujita748de672009-06-17 19:24:03 -0400563
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -0400564 if (orig_inode->i_sb != donor_inode->i_sb) {
565 ext4_debug("ext4 move extent: The argument files "
566 "should be in same FS [ino:orig %lu, donor %lu]\n",
567 orig_inode->i_ino, donor_inode->i_ino);
568 return -EINVAL;
569 }
570
571 /* orig and donor should be different inodes */
572 if (orig_inode == donor_inode) {
Theodore Ts'of3ce8062009-09-28 15:58:29 -0400573 ext4_debug("ext4 move extent: The argument files should not "
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -0400574 "be same inode [ino:orig %lu, donor %lu]\n",
Theodore Ts'of3ce8062009-09-28 15:58:29 -0400575 orig_inode->i_ino, donor_inode->i_ino);
576 return -EINVAL;
577 }
578
Akira Fujita7247c0c2010-03-04 00:34:58 -0500579 /* Regular file check */
580 if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) {
581 ext4_debug("ext4 move extent: The argument files should be "
582 "regular file [ino:orig %lu, donor %lu]\n",
583 orig_inode->i_ino, donor_inode->i_ino);
584 return -EINVAL;
585 }
Eric Whitney04e22412015-06-21 21:38:03 -0400586
587 /* TODO: it's not obvious how to swap blocks for inodes with full
588 journaling enabled */
Dmitry Monakhovf0660552012-09-26 12:32:54 -0400589 if (ext4_should_journal_data(orig_inode) ||
590 ext4_should_journal_data(donor_inode)) {
Eric Whitney04e22412015-06-21 21:38:03 -0400591 ext4_msg(orig_inode->i_sb, KERN_ERR,
592 "Online defrag not supported with data journaling");
593 return -EOPNOTSUPP;
Dmitry Monakhovf0660552012-09-26 12:32:54 -0400594 }
Eric Whitney04e22412015-06-21 21:38:03 -0400595
Chandan Rajendra592ddec2018-12-12 15:20:10 +0530596 if (IS_ENCRYPTED(orig_inode) || IS_ENCRYPTED(donor_inode)) {
Eric Whitney14fbd4a2016-08-29 15:45:11 -0400597 ext4_msg(orig_inode->i_sb, KERN_ERR,
598 "Online defrag not supported for encrypted files");
599 return -EOPNOTSUPP;
600 }
601
Akira Fujitafc04cb42009-11-23 07:24:43 -0500602 /* Protect orig and donor inodes against a truncate */
J. Bruce Fields375e2892012-04-18 15:16:33 -0400603 lock_two_nondirectories(orig_inode, donor_inode);
Akira Fujita748de672009-06-17 19:24:03 -0400604
Dmitry Monakhov17335dc2012-09-29 00:41:21 -0400605 /* Wait for all existing dio workers */
Dmitry Monakhov17335dc2012-09-29 00:41:21 -0400606 inode_dio_wait(orig_inode);
607 inode_dio_wait(donor_inode);
608
Akira Fujitafc04cb42009-11-23 07:24:43 -0500609 /* Protect extent tree against block allocations via delalloc */
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400610 ext4_double_down_write_data_sem(orig_inode, donor_inode);
Akira Fujita748de672009-06-17 19:24:03 -0400611 /* Check the filesystem environment whether move_extent can be done */
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400612 ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
613 donor_blk, &len);
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -0400614 if (ret)
Akira Fujita347fa6f2009-09-16 14:25:07 -0400615 goto out;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400616 o_end = o_start + len;
Akira Fujita748de672009-06-17 19:24:03 -0400617
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400618 while (o_start < o_end) {
619 struct ext4_extent *ex;
620 ext4_lblk_t cur_blk, next_blk;
621 pgoff_t orig_page_index, donor_page_index;
622 int offset_in_page;
623 int unwritten, cur_len;
Akira Fujita748de672009-06-17 19:24:03 -0400624
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400625 ret = get_ext_path(orig_inode, o_start, &path);
626 if (ret)
Akira Fujita748de672009-06-17 19:24:03 -0400627 goto out;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400628 ex = path[path->p_depth].p_ext;
629 next_blk = ext4_ext_next_allocated_block(path);
630 cur_blk = le32_to_cpu(ex->ee_block);
631 cur_len = ext4_ext_get_actual_len(ex);
632 /* Check hole before the start pos */
633 if (cur_blk + cur_len - 1 < o_start) {
634 if (next_blk == EXT_MAX_BLOCKS) {
635 o_start = o_end;
636 ret = -ENODATA;
637 goto out;
638 }
639 d_start += next_blk - o_start;
640 o_start = next_blk;
Theodore Ts'o3bdf14b2014-09-01 14:42:09 -0400641 continue;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400642 /* Check hole after the start pos */
643 } else if (cur_blk > o_start) {
644 /* Skip hole */
645 d_start += cur_blk - o_start;
646 o_start = cur_blk;
647 /* Extent inside requested range ?*/
648 if (cur_blk >= o_end)
649 goto out;
650 } else { /* in_range(o_start, o_blk, o_len) */
651 cur_len += cur_blk - o_start;
Akira Fujita748de672009-06-17 19:24:03 -0400652 }
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400653 unwritten = ext4_ext_is_unwritten(ex);
654 if (o_end - o_start < cur_len)
655 cur_len = o_end - o_start;
Akira Fujita748de672009-06-17 19:24:03 -0400656
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300657 orig_page_index = o_start >> (PAGE_SHIFT -
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400658 orig_inode->i_blkbits);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300659 donor_page_index = d_start >> (PAGE_SHIFT -
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400660 donor_inode->i_blkbits);
661 offset_in_page = o_start % blocks_per_page;
662 if (cur_len > blocks_per_page- offset_in_page)
663 cur_len = blocks_per_page - offset_in_page;
Akira Fujitafc04cb42009-11-23 07:24:43 -0500664 /*
665 * Up semaphore to avoid following problems:
666 * a. transaction deadlock among ext4_journal_start,
667 * ->write_begin via pagefault, and jbd2_journal_commit
668 * b. racing with ->readpage, ->write_begin, and ext4_get_block
669 * in move_extent_per_page
670 */
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400671 ext4_double_up_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400672 /* Swap original branches with new branches */
673 move_extent_per_page(o_filp, donor_inode,
674 orig_page_index, donor_page_index,
675 offset_in_page, cur_len,
676 unwritten, &ret);
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400677 ext4_double_down_write_data_sem(orig_inode, donor_inode);
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -0400678 if (ret < 0)
Akira Fujitafc04cb42009-11-23 07:24:43 -0500679 break;
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400680 o_start += cur_len;
681 d_start += cur_len;
Akira Fujita748de672009-06-17 19:24:03 -0400682 }
Dmitry Monakhovfcf6b1b72014-08-30 23:52:19 -0400683 *moved_len = o_start - orig_blk;
684 if (*moved_len > len)
685 *moved_len = len;
686
Akira Fujita748de672009-06-17 19:24:03 -0400687out:
Akira Fujita94d7c162009-11-24 10:19:57 -0500688 if (*moved_len) {
brookxu27bc4462020-08-17 15:36:15 +0800689 ext4_discard_preallocations(orig_inode, 0);
690 ext4_discard_preallocations(donor_inode, 0);
Akira Fujita94d7c162009-11-24 10:19:57 -0500691 }
692
Theodore Ts'ob7ea89a2014-09-01 14:39:09 -0400693 ext4_ext_drop_refs(path);
694 kfree(path);
Dr. Tilmann Bubeck393d1d12013-04-08 12:54:05 -0400695 ext4_double_up_write_data_sem(orig_inode, donor_inode);
J. Bruce Fields375e2892012-04-18 15:16:33 -0400696 unlock_two_nondirectories(orig_inode, donor_inode);
Akira Fujita748de672009-06-17 19:24:03 -0400697
Dmitry Monakhov03bd8b92012-09-26 12:32:19 -0400698 return ret;
Akira Fujita748de672009-06-17 19:24:03 -0400699}