blob: c4ec7afd1170a7550df8704b2bb71ae450c927f5 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong3993bae2016-10-03 09:11:32 -07002/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
Darrick J. Wong3993bae2016-10-03 09:11:32 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong3993bae2016-10-03 09:11:32 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_defer.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070014#include "xfs_inode.h"
15#include "xfs_trans.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070016#include "xfs_bmap.h"
17#include "xfs_bmap_util.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070018#include "xfs_trace.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070019#include "xfs_icache.h"
Darrick J. Wong174edb02016-10-03 09:11:39 -070020#include "xfs_btree.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070021#include "xfs_refcount_btree.h"
22#include "xfs_refcount.h"
23#include "xfs_bmap_btree.h"
24#include "xfs_trans_space.h"
25#include "xfs_bit.h"
26#include "xfs_alloc.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070027#include "xfs_quota.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070028#include "xfs_reflink.h"
Darrick J. Wong2a067052016-10-03 09:11:33 -070029#include "xfs_iomap.h"
Darrick J. Wong6fa164b2016-10-03 09:11:45 -070030#include "xfs_sb.h"
31#include "xfs_ag_resv.h"
Darrick J. Wong3993bae2016-10-03 09:11:32 -070032
33/*
34 * Copy on Write of Shared Blocks
35 *
36 * XFS must preserve "the usual" file semantics even when two files share
37 * the same physical blocks. This means that a write to one file must not
38 * alter the blocks in a different file; the way that we'll do that is
39 * through the use of a copy-on-write mechanism. At a high level, that
40 * means that when we want to write to a shared block, we allocate a new
41 * block, write the data to the new block, and if that succeeds we map the
42 * new block into the file.
43 *
44 * XFS provides a "delayed allocation" mechanism that defers the allocation
45 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
46 * possible. This reduces fragmentation by enabling the filesystem to ask
47 * for bigger chunks less often, which is exactly what we want for CoW.
48 *
49 * The delalloc mechanism begins when the kernel wants to make a block
50 * writable (write_begin or page_mkwrite). If the offset is not mapped, we
51 * create a delalloc mapping, which is a regular in-core extent, but without
52 * a real startblock. (For delalloc mappings, the startblock encodes both
53 * a flag that this is a delalloc mapping, and a worst-case estimate of how
54 * many blocks might be required to put the mapping into the BMBT.) delalloc
55 * mappings are a reservation against the free space in the filesystem;
56 * adjacent mappings can also be combined into fewer larger mappings.
57 *
Darrick J. Wong5eda4302017-02-02 15:14:02 -080058 * As an optimization, the CoW extent size hint (cowextsz) creates
59 * outsized aligned delalloc reservations in the hope of landing out of
60 * order nearby CoW writes in a single extent on disk, thereby reducing
61 * fragmentation and improving future performance.
62 *
63 * D: --RRRRRRSSSRRRRRRRR--- (data fork)
64 * C: ------DDDDDDD--------- (CoW fork)
65 *
Darrick J. Wong3993bae2016-10-03 09:11:32 -070066 * When dirty pages are being written out (typically in writepage), the
Darrick J. Wong5eda4302017-02-02 15:14:02 -080067 * delalloc reservations are converted into unwritten mappings by
68 * allocating blocks and replacing the delalloc mapping with real ones.
69 * A delalloc mapping can be replaced by several unwritten ones if the
70 * free space is fragmented.
71 *
72 * D: --RRRRRRSSSRRRRRRRR---
73 * C: ------UUUUUUU---------
Darrick J. Wong3993bae2016-10-03 09:11:32 -070074 *
75 * We want to adapt the delalloc mechanism for copy-on-write, since the
76 * write paths are similar. The first two steps (creating the reservation
77 * and allocating the blocks) are exactly the same as delalloc except that
78 * the mappings must be stored in a separate CoW fork because we do not want
79 * to disturb the mapping in the data fork until we're sure that the write
80 * succeeded. IO completion in this case is the process of removing the old
81 * mapping from the data fork and moving the new mapping from the CoW fork to
82 * the data fork. This will be discussed shortly.
83 *
84 * For now, unaligned directio writes will be bounced back to the page cache.
85 * Block-aligned directio writes will use the same mechanism as buffered
86 * writes.
87 *
Darrick J. Wong5eda4302017-02-02 15:14:02 -080088 * Just prior to submitting the actual disk write requests, we convert
89 * the extents representing the range of the file actually being written
90 * (as opposed to extra pieces created for the cowextsize hint) to real
91 * extents. This will become important in the next step:
92 *
93 * D: --RRRRRRSSSRRRRRRRR---
94 * C: ------UUrrUUU---------
95 *
Darrick J. Wong3993bae2016-10-03 09:11:32 -070096 * CoW remapping must be done after the data block write completes,
97 * because we don't want to destroy the old data fork map until we're sure
98 * the new block has been written. Since the new mappings are kept in a
99 * separate fork, we can simply iterate these mappings to find the ones
100 * that cover the file blocks that we just CoW'd. For each extent, simply
101 * unmap the corresponding range in the data fork, map the new range into
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800102 * the data fork, and remove the extent from the CoW fork. Because of
103 * the presence of the cowextsize hint, however, we must be careful
104 * only to remap the blocks that we've actually written out -- we must
105 * never remap delalloc reservations nor CoW staging blocks that have
106 * yet to be written. This corresponds exactly to the real extents in
107 * the CoW fork:
108 *
109 * D: --RRRRRRrrSRRRRRRRR---
110 * C: ------UU--UUU---------
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700111 *
112 * Since the remapping operation can be applied to an arbitrary file
113 * range, we record the need for the remap step as a flag in the ioend
114 * instead of declaring a new IO type. This is required for direct io
115 * because we only have ioend for the whole dio, and we have to be able to
116 * remember the presence of unwritten blocks and CoW blocks with a single
117 * ioend structure. Better yet, the more ground we can cover with one
118 * ioend, the better.
119 */
Darrick J. Wong2a067052016-10-03 09:11:33 -0700120
121/*
122 * Given an AG extent, find the lowest-numbered run of shared blocks
123 * within that range and return the range in fbno/flen. If
124 * find_end_of_shared is true, return the longest contiguous extent of
125 * shared blocks. If there are no shared extents, fbno and flen will
126 * be set to NULLAGBLOCK and 0, respectively.
127 */
128int
129xfs_reflink_find_shared(
130 struct xfs_mount *mp,
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700131 struct xfs_trans *tp,
Darrick J. Wong2a067052016-10-03 09:11:33 -0700132 xfs_agnumber_t agno,
133 xfs_agblock_t agbno,
134 xfs_extlen_t aglen,
135 xfs_agblock_t *fbno,
136 xfs_extlen_t *flen,
137 bool find_end_of_shared)
138{
139 struct xfs_buf *agbp;
140 struct xfs_btree_cur *cur;
141 int error;
142
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700143 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
Darrick J. Wong2a067052016-10-03 09:11:33 -0700144 if (error)
145 return error;
Darrick J. Wong10479e22017-07-17 14:30:57 -0700146 if (!agbp)
147 return -ENOMEM;
Darrick J. Wong2a067052016-10-03 09:11:33 -0700148
Brian Fostered7ef8e2018-07-11 22:26:17 -0700149 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
Darrick J. Wong2a067052016-10-03 09:11:33 -0700150
151 error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
152 find_end_of_shared);
153
Darrick J. Wong0b04b6b82018-07-19 12:26:31 -0700154 xfs_btree_del_cursor(cur, error);
Darrick J. Wong2a067052016-10-03 09:11:33 -0700155
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700156 xfs_trans_brelse(tp, agbp);
Darrick J. Wong2a067052016-10-03 09:11:33 -0700157 return error;
158}
159
160/*
161 * Trim the mapping to the next block where there's a change in the
162 * shared/unshared status. More specifically, this means that we
163 * find the lowest-numbered extent of shared blocks that coincides with
164 * the given block mapping. If the shared extent overlaps the start of
165 * the mapping, trim the mapping to the end of the shared extent. If
166 * the shared region intersects the mapping, trim the mapping to the
167 * start of the shared extent. If there are no shared regions that
168 * overlap, just return the original extent.
169 */
170int
171xfs_reflink_trim_around_shared(
172 struct xfs_inode *ip,
173 struct xfs_bmbt_irec *irec,
Christoph Hellwigd392bc82018-10-18 17:19:48 +1100174 bool *shared)
Darrick J. Wong2a067052016-10-03 09:11:33 -0700175{
176 xfs_agnumber_t agno;
177 xfs_agblock_t agbno;
178 xfs_extlen_t aglen;
179 xfs_agblock_t fbno;
180 xfs_extlen_t flen;
181 int error = 0;
182
183 /* Holes, unwritten, and delalloc extents cannot be shared */
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800184 if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
Darrick J. Wong2a067052016-10-03 09:11:33 -0700185 *shared = false;
186 return 0;
187 }
188
189 trace_xfs_reflink_trim_around_shared(ip, irec);
190
191 agno = XFS_FSB_TO_AGNO(ip->i_mount, irec->br_startblock);
192 agbno = XFS_FSB_TO_AGBNO(ip->i_mount, irec->br_startblock);
193 aglen = irec->br_blockcount;
194
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700195 error = xfs_reflink_find_shared(ip->i_mount, NULL, agno, agbno,
Darrick J. Wong2a067052016-10-03 09:11:33 -0700196 aglen, &fbno, &flen, true);
197 if (error)
198 return error;
199
Christoph Hellwigd392bc82018-10-18 17:19:48 +1100200 *shared = false;
Darrick J. Wong2a067052016-10-03 09:11:33 -0700201 if (fbno == NULLAGBLOCK) {
202 /* No shared blocks at all. */
203 return 0;
204 } else if (fbno == agbno) {
205 /*
206 * The start of this extent is shared. Truncate the
207 * mapping at the end of the shared region so that a
208 * subsequent iteration starts at the start of the
209 * unshared region.
210 */
211 irec->br_blockcount = flen;
212 *shared = true;
Darrick J. Wong2a067052016-10-03 09:11:33 -0700213 return 0;
214 } else {
215 /*
216 * There's a shared extent midway through this extent.
217 * Truncate the mapping at the start of the shared
218 * extent so that a subsequent iteration starts at the
219 * start of the shared region.
220 */
221 irec->br_blockcount = fbno - agbno;
Darrick J. Wong2a067052016-10-03 09:11:33 -0700222 return 0;
223 }
224}
225
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800226bool
227xfs_inode_need_cow(
228 struct xfs_inode *ip,
229 struct xfs_bmbt_irec *imap,
230 bool *shared)
231{
232 /* We can't update any real extents in always COW mode. */
233 if (xfs_is_always_cow_inode(ip) &&
234 !isnullstartblock(imap->br_startblock)) {
235 *shared = true;
236 return 0;
237 }
238
239 /* Trim the mapping to the nearest shared extent boundary. */
240 return xfs_reflink_trim_around_shared(ip, imap, shared);
241}
242
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800243static int
244xfs_reflink_convert_cow_locked(
245 struct xfs_inode *ip,
246 xfs_fileoff_t offset_fsb,
247 xfs_filblks_t count_fsb)
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800248{
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800249 struct xfs_iext_cursor icur;
250 struct xfs_bmbt_irec got;
251 struct xfs_btree_cur *dummy_cur = NULL;
252 int dummy_logflags;
Darrick J. Wongc1a44472019-02-25 09:35:34 -0800253 int error = 0;
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800254
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800255 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800256 return 0;
257
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800258 do {
259 if (got.br_startoff >= offset_fsb + count_fsb)
260 break;
261 if (got.br_state == XFS_EXT_NORM)
262 continue;
263 if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
264 return -EIO;
265
266 xfs_trim_extent(&got, offset_fsb, count_fsb);
267 if (!got.br_blockcount)
268 continue;
269
270 got.br_state = XFS_EXT_NORM;
271 error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
272 XFS_COW_FORK, &icur, &dummy_cur, &got,
273 &dummy_logflags);
274 if (error)
275 return error;
276 } while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));
277
278 return error;
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800279}
280
281/* Convert all of the unwritten CoW extents in a file's range to real ones. */
282int
283xfs_reflink_convert_cow(
284 struct xfs_inode *ip,
285 xfs_off_t offset,
286 xfs_off_t count)
287{
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800288 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800289 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
290 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
Christoph Hellwigb1214592017-11-03 10:34:44 -0700291 xfs_filblks_t count_fsb = end_fsb - offset_fsb;
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800292 int error;
Christoph Hellwigb1214592017-11-03 10:34:44 -0700293
294 ASSERT(count != 0);
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800295
296 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800297 error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800298 xfs_iunlock(ip, XFS_ILOCK_EXCL);
299 return error;
300}
301
Dave Chinnerdf307072018-09-29 13:47:15 +1000302/*
303 * Find the extent that maps the given range in the COW fork. Even if the extent
304 * is not shared we might have a preallocation for it in the COW fork. If so we
305 * use it that rather than trigger a new allocation.
306 */
307static int
308xfs_find_trim_cow_extent(
309 struct xfs_inode *ip,
310 struct xfs_bmbt_irec *imap,
311 bool *shared,
312 bool *found)
313{
314 xfs_fileoff_t offset_fsb = imap->br_startoff;
315 xfs_filblks_t count_fsb = imap->br_blockcount;
316 struct xfs_iext_cursor icur;
317 struct xfs_bmbt_irec got;
Dave Chinnerdf307072018-09-29 13:47:15 +1000318
319 *found = false;
320
321 /*
322 * If we don't find an overlapping extent, trim the range we need to
323 * allocate to fit the hole we found.
324 */
Christoph Hellwig032dc922018-10-18 17:19:58 +1100325 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
326 got.br_startoff = offset_fsb + count_fsb;
327 if (got.br_startoff > offset_fsb) {
328 xfs_trim_extent(imap, imap->br_startoff,
329 got.br_startoff - imap->br_startoff);
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800330 return xfs_inode_need_cow(ip, imap, shared);
Christoph Hellwig032dc922018-10-18 17:19:58 +1100331 }
Dave Chinnerdf307072018-09-29 13:47:15 +1000332
333 *shared = true;
334 if (isnullstartblock(got.br_startblock)) {
335 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
336 return 0;
337 }
338
339 /* real extent found - no need to allocate */
340 xfs_trim_extent(&got, offset_fsb, count_fsb);
341 *imap = got;
342 *found = true;
343 return 0;
344}
345
Darrick J. Wong0613f162016-10-03 09:11:37 -0700346/* Allocate all CoW reservations covering a range of blocks in a file. */
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800347int
348xfs_reflink_allocate_cow(
Darrick J. Wong0613f162016-10-03 09:11:37 -0700349 struct xfs_inode *ip,
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800350 struct xfs_bmbt_irec *imap,
351 bool *shared,
Christoph Hellwig78f0cc92019-02-18 09:38:46 -0800352 uint *lockmode,
Darrick J. Wongaffe2502019-02-21 16:26:35 -0800353 bool convert_now)
Darrick J. Wong0613f162016-10-03 09:11:37 -0700354{
355 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800356 xfs_fileoff_t offset_fsb = imap->br_startoff;
357 xfs_filblks_t count_fsb = imap->br_blockcount;
Dave Chinnerdf307072018-09-29 13:47:15 +1000358 struct xfs_trans *tp;
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800359 int nimaps, error = 0;
Dave Chinnerdf307072018-09-29 13:47:15 +1000360 bool found;
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800361 xfs_filblks_t resaligned;
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800362 xfs_extlen_t resblks = 0;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700363
Christoph Hellwigc7dbe3f2018-03-13 23:15:31 -0700364 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800365 if (!ip->i_cowfp) {
366 ASSERT(!xfs_is_reflink_inode(ip));
367 xfs_ifork_init_cow(ip);
368 }
Dave Chinnerdf307072018-09-29 13:47:15 +1000369
370 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
371 if (error || !*shared)
372 return error;
373 if (found)
374 goto convert;
375
376 resaligned = xfs_aligned_fsb_count(imap->br_startoff,
377 imap->br_blockcount, xfs_get_cowextsz_hint(ip));
378 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
379
380 xfs_iunlock(ip, *lockmode);
381 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
382 *lockmode = XFS_ILOCK_EXCL;
383 xfs_ilock(ip, *lockmode);
384
385 if (error)
386 return error;
387
388 error = xfs_qm_dqattach_locked(ip, false);
389 if (error)
390 goto out_trans_cancel;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700391
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800392 /*
Dave Chinnerdf307072018-09-29 13:47:15 +1000393 * Check for an overlapping extent again now that we dropped the ilock.
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800394 */
Dave Chinnerdf307072018-09-29 13:47:15 +1000395 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
396 if (error || !*shared)
397 goto out_trans_cancel;
398 if (found) {
399 xfs_trans_cancel(tp);
400 goto convert;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700401 }
402
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800403 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
404 XFS_QMOPT_RES_REGBLKS);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700405 if (error)
Dave Chinnerdf307072018-09-29 13:47:15 +1000406 goto out_trans_cancel;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700407
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800408 xfs_trans_ijoin(tp, ip, 0);
409
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800410 /* Allocate the entire reservation as unwritten blocks. */
Dave Chinnerdf307072018-09-29 13:47:15 +1000411 nimaps = 1;
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800412 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
Brian Foster650919f2018-07-11 22:26:23 -0700413 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
Brian Fostera7beabe2018-07-11 22:26:25 -0700414 resblks, imap, &nimaps);
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800415 if (error)
Dave Chinnerdf307072018-09-29 13:47:15 +1000416 goto out_unreserve;
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800417
Darrick J. Wong86d692b2017-12-14 15:46:06 -0800418 xfs_inode_set_cowblocks_tag(ip);
Darrick J. Wong0613f162016-10-03 09:11:37 -0700419 error = xfs_trans_commit(tp);
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800420 if (error)
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800421 return error;
Darrick J. Wong9f37bd12018-01-26 11:37:44 -0800422
423 /*
424 * Allocation succeeded but the requested range was not even partially
425 * satisfied? Bail out!
426 */
427 if (nimaps == 0)
428 return -ENOSPC;
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800429convert:
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800430 xfs_trim_extent(imap, offset_fsb, count_fsb);
Christoph Hellwig78f0cc92019-02-18 09:38:46 -0800431 /*
432 * COW fork extents are supposed to remain unwritten until we're ready
433 * to initiate a disk write. For direct I/O we are going to write the
434 * data and need the conversion, but for buffered writes we're done.
435 */
Darrick J. Wongaffe2502019-02-21 16:26:35 -0800436 if (!convert_now || imap->br_state == XFS_EXT_NORM)
Christoph Hellwig78f0cc92019-02-18 09:38:46 -0800437 return 0;
Christoph Hellwig26b91c72019-02-18 09:38:48 -0800438 trace_xfs_reflink_convert_cow(ip, imap);
439 return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
Dave Chinnerdf307072018-09-29 13:47:15 +1000440
441out_unreserve:
Christoph Hellwiga14234c2017-02-06 10:50:49 -0800442 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
443 XFS_QMOPT_RES_REGBLKS);
Dave Chinnerdf307072018-09-29 13:47:15 +1000444out_trans_cancel:
445 xfs_trans_cancel(tp);
Christoph Hellwig3c68d442017-02-06 10:51:03 -0800446 return error;
Darrick J. Wong0613f162016-10-03 09:11:37 -0700447}
448
Darrick J. Wongef473662016-10-03 09:11:34 -0700449/*
Christoph Hellwig3802a342017-03-07 16:45:58 -0800450 * Cancel CoW reservations for some block range of an inode.
451 *
452 * If cancel_real is true this function cancels all COW fork extents for the
453 * inode; if cancel_real is false, real extents are not cleared.
Dave Chinnerc5295c62018-05-09 07:49:09 -0700454 *
455 * Caller must have already joined the inode to the current transaction. The
456 * inode will be joined to the transaction returned to the caller.
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700457 */
458int
459xfs_reflink_cancel_cow_blocks(
460 struct xfs_inode *ip,
461 struct xfs_trans **tpp,
462 xfs_fileoff_t offset_fsb,
Christoph Hellwig3802a342017-03-07 16:45:58 -0800463 xfs_fileoff_t end_fsb,
464 bool cancel_real)
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700465{
Christoph Hellwig3e0ee782016-10-20 15:54:31 +1100466 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
Christoph Hellwigdf5ab1b2016-11-24 11:39:50 +1100467 struct xfs_bmbt_irec got, del;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700468 struct xfs_iext_cursor icur;
Christoph Hellwigdf5ab1b2016-11-24 11:39:50 +1100469 int error = 0;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700470
Christoph Hellwig51d62692018-07-17 16:51:51 -0700471 if (!xfs_inode_has_cow_data(ip))
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700472 return 0;
Christoph Hellwig41caabd2017-11-03 10:34:44 -0700473 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
Christoph Hellwig3e0ee782016-10-20 15:54:31 +1100474 return 0;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700475
Christoph Hellwig41caabd2017-11-03 10:34:44 -0700476 /* Walk backwards until we're out of the I/O range... */
477 while (got.br_startoff + got.br_blockcount > offset_fsb) {
Christoph Hellwig3e0ee782016-10-20 15:54:31 +1100478 del = got;
479 xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
Christoph Hellwig41caabd2017-11-03 10:34:44 -0700480
481 /* Extent delete may have bumped ext forward */
482 if (!del.br_blockcount) {
483 xfs_iext_prev(ifp, &icur);
484 goto next_extent;
485 }
486
Christoph Hellwig3e0ee782016-10-20 15:54:31 +1100487 trace_xfs_reflink_cancel_cow(ip, &del);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700488
Christoph Hellwig3e0ee782016-10-20 15:54:31 +1100489 if (isnullstartblock(del.br_startblock)) {
490 error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700491 &icur, &got, &del);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700492 if (error)
493 break;
Christoph Hellwig3802a342017-03-07 16:45:58 -0800494 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
Brian Foster1e5ae192018-07-24 13:43:12 -0700495 ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700496
Darrick J. Wong174edb02016-10-03 09:11:39 -0700497 /* Free the CoW orphan record. */
Brian Foster0f37d172018-08-01 07:20:34 -0700498 error = xfs_refcount_free_cow_extent(*tpp,
499 del.br_startblock, del.br_blockcount);
Darrick J. Wong174edb02016-10-03 09:11:39 -0700500 if (error)
501 break;
502
Brian Foster0f37d172018-08-01 07:20:34 -0700503 xfs_bmap_add_free(*tpp, del.br_startblock,
504 del.br_blockcount, NULL);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700505
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700506 /* Roll the transaction */
Brian Foster9e28a242018-07-24 13:43:15 -0700507 error = xfs_defer_finish(tpp);
Brian Foster9b1f4e92018-08-01 07:20:33 -0700508 if (error)
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700509 break;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700510
511 /* Remove the mapping from the CoW fork. */
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700512 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
Darrick J. Wong4b4c1322018-01-19 09:05:48 -0800513
514 /* Remove the quota reservation */
515 error = xfs_trans_reserve_quota_nblks(NULL, ip,
516 -(long)del.br_blockcount, 0,
517 XFS_QMOPT_RES_REGBLKS);
518 if (error)
519 break;
Darrick J. Wong9d40fba2017-12-10 18:03:55 -0800520 } else {
521 /* Didn't do anything, push cursor back. */
522 xfs_iext_prev(ifp, &icur);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700523 }
Christoph Hellwig41caabd2017-11-03 10:34:44 -0700524next_extent:
525 if (!xfs_iext_get_extent(ifp, &icur, &got))
Brian Fosterc17a8ef2016-10-24 14:21:08 +1100526 break;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700527 }
528
Brian Fosterc17a8ef2016-10-24 14:21:08 +1100529 /* clear tag if cow fork is emptied */
530 if (!ifp->if_bytes)
531 xfs_inode_clear_cowblocks_tag(ip);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700532 return error;
533}
534
535/*
Christoph Hellwig3802a342017-03-07 16:45:58 -0800536 * Cancel CoW reservations for some byte range of an inode.
537 *
538 * If cancel_real is true this function cancels all COW fork extents for the
539 * inode; if cancel_real is false, real extents are not cleared.
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700540 */
541int
542xfs_reflink_cancel_cow_range(
543 struct xfs_inode *ip,
544 xfs_off_t offset,
Christoph Hellwig3802a342017-03-07 16:45:58 -0800545 xfs_off_t count,
546 bool cancel_real)
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700547{
548 struct xfs_trans *tp;
549 xfs_fileoff_t offset_fsb;
550 xfs_fileoff_t end_fsb;
551 int error;
552
553 trace_xfs_reflink_cancel_cow_range(ip, offset, count);
Christoph Hellwig66ae56a2019-02-18 09:38:49 -0800554 ASSERT(ip->i_cowfp);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700555
556 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
557 if (count == NULLFILEOFF)
558 end_fsb = NULLFILEOFF;
559 else
560 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
561
562 /* Start a rolling transaction to remove the mappings */
563 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700564 0, 0, 0, &tp);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700565 if (error)
566 goto out;
567
568 xfs_ilock(ip, XFS_ILOCK_EXCL);
569 xfs_trans_ijoin(tp, ip, 0);
570
571 /* Scrape out the old CoW reservations */
Christoph Hellwig3802a342017-03-07 16:45:58 -0800572 error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
573 cancel_real);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700574 if (error)
575 goto out_cancel;
576
577 error = xfs_trans_commit(tp);
578
579 xfs_iunlock(ip, XFS_ILOCK_EXCL);
580 return error;
581
582out_cancel:
583 xfs_trans_cancel(tp);
584 xfs_iunlock(ip, XFS_ILOCK_EXCL);
585out:
586 trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
587 return error;
588}
589
590/*
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800591 * Remap part of the CoW fork into the data fork.
592 *
593 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
594 * into the data fork; this function will remap what it can (at the end of the
595 * range) and update @end_fsb appropriately. Each remap gets its own
596 * transaction because we can end up merging and splitting bmbt blocks for
597 * every remap operation and we'd like to keep the block reservation
598 * requirements as low as possible.
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700599 */
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800600STATIC int
601xfs_reflink_end_cow_extent(
602 struct xfs_inode *ip,
603 xfs_fileoff_t offset_fsb,
604 xfs_fileoff_t *end_fsb)
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700605{
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800606 struct xfs_bmbt_irec got, del;
607 struct xfs_iext_cursor icur;
608 struct xfs_mount *mp = ip->i_mount;
609 struct xfs_trans *tp;
610 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
611 xfs_filblks_t rlen;
612 unsigned int resblks;
613 int error;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700614
Christoph Hellwigc1112b62016-10-20 15:54:45 +1100615 /* No COW extents? That's easy! */
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800616 if (ifp->if_bytes == 0) {
617 *end_fsb = offset_fsb;
Christoph Hellwigc1112b62016-10-20 15:54:45 +1100618 return 0;
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800619 }
Christoph Hellwigc1112b62016-10-20 15:54:45 +1100620
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800621 resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
622 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
Christoph Hellwig73d30d42019-06-28 19:31:38 -0700623 XFS_TRANS_RESERVE, &tp);
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800624 if (error)
625 return error;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700626
Darrick J. Wongfe0be232017-04-12 12:26:07 -0700627 /*
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800628 * Lock the inode. We have to ijoin without automatic unlock because
629 * the lead transaction is the refcountbt record deletion; the data
630 * fork update follows as a deferred log item.
Darrick J. Wongfe0be232017-04-12 12:26:07 -0700631 */
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700632 xfs_ilock(ip, XFS_ILOCK_EXCL);
633 xfs_trans_ijoin(tp, ip, 0);
634
Christoph Hellwigdc560152017-10-23 16:32:39 -0700635 /*
636 * In case of racing, overlapping AIO writes no COW extents might be
637 * left by the time I/O completes for the loser of the race. In that
638 * case we are done.
639 */
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800640 if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) ||
641 got.br_startoff + got.br_blockcount <= offset_fsb) {
642 *end_fsb = offset_fsb;
643 goto out_cancel;
644 }
645
646 /*
647 * Structure copy @got into @del, then trim @del to the range that we
648 * were asked to remap. We preserve @got for the eventual CoW fork
649 * deletion; from now on @del represents the mapping that we're
650 * actually remapping.
651 */
652 del = got;
653 xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb);
654
655 ASSERT(del.br_blockcount > 0);
656
657 /*
658 * Only remap real extents that contain data. With AIO, speculative
659 * preallocations can leak into the range we are called upon, and we
660 * need to skip them.
661 */
662 if (!xfs_bmap_is_real_extent(&got)) {
663 *end_fsb = del.br_startoff;
664 goto out_cancel;
665 }
666
667 /* Unmap the old blocks in the data fork. */
668 rlen = del.br_blockcount;
669 error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
670 if (error)
Christoph Hellwigdc560152017-10-23 16:32:39 -0700671 goto out_cancel;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700672
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800673 /* Trim the extent to whatever got unmapped. */
674 xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen);
675 trace_xfs_reflink_cow_remap(ip, &del);
Christoph Hellwigc1112b62016-10-20 15:54:45 +1100676
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800677 /* Free the CoW orphan record. */
678 error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
679 del.br_blockcount);
680 if (error)
681 goto out_cancel;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700682
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800683 /* Map the new blocks into the data fork. */
684 error = xfs_bmap_map_extent(tp, ip, &del);
685 if (error)
686 goto out_cancel;
Darrick J. Wong5eda4302017-02-02 15:14:02 -0800687
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800688 /* Charge this new data fork mapping to the on-disk quota. */
689 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
690 (long)del.br_blockcount);
Christoph Hellwigc1112b62016-10-20 15:54:45 +1100691
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800692 /* Remove the mapping from the CoW fork. */
693 xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700694
695 error = xfs_trans_commit(tp);
696 xfs_iunlock(ip, XFS_ILOCK_EXCL);
697 if (error)
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800698 return error;
699
700 /* Update the caller about how much progress we made. */
701 *end_fsb = del.br_startoff;
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700702 return 0;
703
Christoph Hellwige12199f2017-10-03 08:58:33 -0700704out_cancel:
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700705 xfs_trans_cancel(tp);
706 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wongd6f215f2018-12-12 08:46:19 -0800707 return error;
708}
709
710/*
711 * Remap parts of a file's data fork after a successful CoW.
712 */
713int
714xfs_reflink_end_cow(
715 struct xfs_inode *ip,
716 xfs_off_t offset,
717 xfs_off_t count)
718{
719 xfs_fileoff_t offset_fsb;
720 xfs_fileoff_t end_fsb;
721 int error = 0;
722
723 trace_xfs_reflink_end_cow(ip, offset, count);
724
725 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
726 end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
727
728 /*
729 * Walk backwards until we're out of the I/O range. The loop function
730 * repeatedly cycles the ILOCK to allocate one transaction per remapped
731 * extent.
732 *
733 * If we're being called by writeback then the the pages will still
734 * have PageWriteback set, which prevents races with reflink remapping
735 * and truncate. Reflink remapping prevents races with writeback by
736 * taking the iolock and mmaplock before flushing the pages and
737 * remapping, which means there won't be any further writeback or page
738 * cache dirtying until the reflink completes.
739 *
740 * We should never have two threads issuing writeback for the same file
741 * region. There are also have post-eof checks in the writeback
742 * preparation code so that we don't bother writing out pages that are
743 * about to be truncated.
744 *
745 * If we're being called as part of directio write completion, the dio
746 * count is still elevated, which reflink and truncate will wait for.
747 * Reflink remapping takes the iolock and mmaplock and waits for
748 * pending dio to finish, which should prevent any directio until the
749 * remap completes. Multiple concurrent directio writes to the same
750 * region are handled by end_cow processing only occurring for the
751 * threads which succeed; the outcome of multiple overlapping direct
752 * writes is not well defined anyway.
753 *
754 * It's possible that a buffered write and a direct write could collide
755 * here (the buffered write stumbles in after the dio flushes and
756 * invalidates the page cache and immediately queues writeback), but we
757 * have never supported this 100%. If either disk write succeeds the
758 * blocks will be remapped.
759 */
760 while (end_fsb > offset_fsb && !error)
761 error = xfs_reflink_end_cow_extent(ip, offset_fsb, &end_fsb);
762
763 if (error)
764 trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
Darrick J. Wong43caeb12016-10-03 09:11:35 -0700765 return error;
766}
Darrick J. Wong174edb02016-10-03 09:11:39 -0700767
768/*
769 * Free leftover CoW reservations that didn't get cleaned out.
770 */
771int
772xfs_reflink_recover_cow(
773 struct xfs_mount *mp)
774{
775 xfs_agnumber_t agno;
776 int error = 0;
777
778 if (!xfs_sb_version_hasreflink(&mp->m_sb))
779 return 0;
780
781 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
782 error = xfs_refcount_recover_cow_leftovers(mp, agno);
783 if (error)
784 break;
785 }
786
787 return error;
788}
Darrick J. Wong862bb362016-10-03 09:11:40 -0700789
790/*
791 * Reflinking (Block) Ranges of Two Files Together
792 *
793 * First, ensure that the reflink flag is set on both inodes. The flag is an
794 * optimization to avoid unnecessary refcount btree lookups in the write path.
795 *
796 * Now we can iteratively remap the range of extents (and holes) in src to the
797 * corresponding ranges in dest. Let drange and srange denote the ranges of
798 * logical blocks in dest and src touched by the reflink operation.
799 *
800 * While the length of drange is greater than zero,
801 * - Read src's bmbt at the start of srange ("imap")
802 * - If imap doesn't exist, make imap appear to start at the end of srange
803 * with zero length.
804 * - If imap starts before srange, advance imap to start at srange.
805 * - If imap goes beyond srange, truncate imap to end at the end of srange.
806 * - Punch (imap start - srange start + imap len) blocks from dest at
807 * offset (drange start).
808 * - If imap points to a real range of pblks,
809 * > Increase the refcount of the imap's pblks
810 * > Map imap's pblks into dest at the offset
811 * (drange start + imap start - srange start)
812 * - Advance drange and srange by (imap start - srange start + imap len)
813 *
814 * Finally, if the reflink made dest longer, update both the in-core and
815 * on-disk file sizes.
816 *
817 * ASCII Art Demonstration:
818 *
819 * Let's say we want to reflink this source file:
820 *
821 * ----SSSSSSS-SSSSS----SSSSSS (src file)
822 * <-------------------->
823 *
824 * into this destination file:
825 *
826 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
827 * <-------------------->
828 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
829 * Observe that the range has different logical offsets in either file.
830 *
831 * Consider that the first extent in the source file doesn't line up with our
832 * reflink range. Unmapping and remapping are separate operations, so we can
833 * unmap more blocks from the destination file than we remap.
834 *
835 * ----SSSSSSS-SSSSS----SSSSSS
836 * <------->
837 * --DDDDD---------DDDDD--DDD
838 * <------->
839 *
840 * Now remap the source extent into the destination file:
841 *
842 * ----SSSSSSS-SSSSS----SSSSSS
843 * <------->
844 * --DDDDD--SSSSSSSDDDDD--DDD
845 * <------->
846 *
847 * Do likewise with the second hole and extent in our range. Holes in the
848 * unmap range don't affect our operation.
849 *
850 * ----SSSSSSS-SSSSS----SSSSSS
851 * <---->
852 * --DDDDD--SSSSSSS-SSSSS-DDD
853 * <---->
854 *
855 * Finally, unmap and remap part of the third extent. This will increase the
856 * size of the destination file.
857 *
858 * ----SSSSSSS-SSSSS----SSSSSS
859 * <----->
860 * --DDDDD--SSSSSSS-SSSSS----SSS
861 * <----->
862 *
863 * Once we update the destination file's i_size, we're done.
864 */
865
866/*
867 * Ensure the reflink bit is set in both inodes.
868 */
869STATIC int
870xfs_reflink_set_inode_flag(
871 struct xfs_inode *src,
872 struct xfs_inode *dest)
873{
874 struct xfs_mount *mp = src->i_mount;
875 int error;
876 struct xfs_trans *tp;
877
878 if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
879 return 0;
880
881 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
882 if (error)
883 goto out_error;
884
885 /* Lock both files against IO */
886 if (src->i_ino == dest->i_ino)
887 xfs_ilock(src, XFS_ILOCK_EXCL);
888 else
Darrick J. Wong7c2d2382018-01-26 15:27:33 -0800889 xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
Darrick J. Wong862bb362016-10-03 09:11:40 -0700890
891 if (!xfs_is_reflink_inode(src)) {
892 trace_xfs_reflink_set_inode_flag(src);
893 xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
894 src->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
895 xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
896 xfs_ifork_init_cow(src);
897 } else
898 xfs_iunlock(src, XFS_ILOCK_EXCL);
899
900 if (src->i_ino == dest->i_ino)
901 goto commit_flags;
902
903 if (!xfs_is_reflink_inode(dest)) {
904 trace_xfs_reflink_set_inode_flag(dest);
905 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
906 dest->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
907 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
908 xfs_ifork_init_cow(dest);
909 } else
910 xfs_iunlock(dest, XFS_ILOCK_EXCL);
911
912commit_flags:
913 error = xfs_trans_commit(tp);
914 if (error)
915 goto out_error;
916 return error;
917
918out_error:
919 trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
920 return error;
921}
922
923/*
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700924 * Update destination inode size & cowextsize hint, if necessary.
Darrick J. Wong862bb362016-10-03 09:11:40 -0700925 */
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +1100926int
Darrick J. Wong862bb362016-10-03 09:11:40 -0700927xfs_reflink_update_dest(
928 struct xfs_inode *dest,
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700929 xfs_off_t newlen,
Christoph Hellwigc5ecb422017-02-06 17:45:51 -0800930 xfs_extlen_t cowextsize,
Darrick J. Wonga91ae49b2018-10-30 10:41:28 +1100931 unsigned int remap_flags)
Darrick J. Wong862bb362016-10-03 09:11:40 -0700932{
933 struct xfs_mount *mp = dest->i_mount;
934 struct xfs_trans *tp;
935 int error;
936
Darrick J. Wongbf4a1fc2018-10-30 10:47:48 +1100937 if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
Darrick J. Wong862bb362016-10-03 09:11:40 -0700938 return 0;
939
940 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
941 if (error)
942 goto out_error;
943
944 xfs_ilock(dest, XFS_ILOCK_EXCL);
945 xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
946
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700947 if (newlen > i_size_read(VFS_I(dest))) {
948 trace_xfs_reflink_update_inode_size(dest, newlen);
949 i_size_write(VFS_I(dest), newlen);
950 dest->i_d.di_size = newlen;
951 }
952
953 if (cowextsize) {
954 dest->i_d.di_cowextsize = cowextsize;
955 dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
956 }
957
Darrick J. Wong862bb362016-10-03 09:11:40 -0700958 xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
959
960 error = xfs_trans_commit(tp);
961 if (error)
962 goto out_error;
963 return error;
964
965out_error:
966 trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
967 return error;
968}
969
970/*
Darrick J. Wong6fa164b2016-10-03 09:11:45 -0700971 * Do we have enough reserve in this AG to handle a reflink? The refcount
972 * btree already reserved all the space it needs, but the rmap btree can grow
973 * infinitely, so we won't allow more reflinks when the AG is down to the
974 * btree reserves.
975 */
976static int
977xfs_reflink_ag_has_free_space(
978 struct xfs_mount *mp,
979 xfs_agnumber_t agno)
980{
981 struct xfs_perag *pag;
982 int error = 0;
983
984 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
985 return 0;
986
987 pag = xfs_perag_get(mp, agno);
Brian Foster21592862018-03-09 14:01:59 -0800988 if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
Darrick J. Wong6fa164b2016-10-03 09:11:45 -0700989 xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
990 error = -ENOSPC;
991 xfs_perag_put(pag);
992 return error;
993}
994
995/*
Darrick J. Wong862bb362016-10-03 09:11:40 -0700996 * Unmap a range of blocks from a file, then map other blocks into the hole.
997 * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount).
998 * The extent irec is mapped into dest at irec->br_startoff.
999 */
1000STATIC int
1001xfs_reflink_remap_extent(
1002 struct xfs_inode *ip,
1003 struct xfs_bmbt_irec *irec,
1004 xfs_fileoff_t destoff,
1005 xfs_off_t new_isize)
1006{
1007 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwig9c4f29d2017-03-28 14:53:35 -07001008 bool real_extent = xfs_bmap_is_real_extent(irec);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001009 struct xfs_trans *tp;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001010 unsigned int resblks;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001011 struct xfs_bmbt_irec uirec;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001012 xfs_filblks_t rlen;
1013 xfs_filblks_t unmap_len;
1014 xfs_off_t newlen;
1015 int error;
1016
1017 unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
1018 trace_xfs_reflink_punch_range(ip, destoff, unmap_len);
1019
Darrick J. Wong6fa164b2016-10-03 09:11:45 -07001020 /* No reflinking if we're low on space */
1021 if (real_extent) {
1022 error = xfs_reflink_ag_has_free_space(mp,
1023 XFS_FSB_TO_AGNO(mp, irec->br_startblock));
1024 if (error)
1025 goto out;
1026 }
1027
Darrick J. Wong862bb362016-10-03 09:11:40 -07001028 /* Start a rolling transaction to switch the mappings */
1029 resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
1030 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1031 if (error)
1032 goto out;
1033
1034 xfs_ilock(ip, XFS_ILOCK_EXCL);
1035 xfs_trans_ijoin(tp, ip, 0);
1036
1037 /* If we're not just clearing space, then do we have enough quota? */
1038 if (real_extent) {
1039 error = xfs_trans_reserve_quota_nblks(tp, ip,
1040 irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
1041 if (error)
1042 goto out_cancel;
1043 }
1044
1045 trace_xfs_reflink_remap(ip, irec->br_startoff,
1046 irec->br_blockcount, irec->br_startblock);
1047
1048 /* Unmap the old blocks in the data fork. */
1049 rlen = unmap_len;
1050 while (rlen) {
Brian Foster9d9e6232018-08-01 07:20:35 -07001051 ASSERT(tp->t_firstblock == NULLFSBLOCK);
Brian Foster2af52842018-07-11 22:26:25 -07001052 error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001053 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001054 goto out_cancel;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001055
1056 /*
1057 * Trim the extent to whatever got unmapped.
1058 * Remember, bunmapi works backwards.
1059 */
1060 uirec.br_startblock = irec->br_startblock + rlen;
1061 uirec.br_startoff = irec->br_startoff + rlen;
1062 uirec.br_blockcount = unmap_len - rlen;
1063 unmap_len = rlen;
1064
1065 /* If this isn't a real mapping, we're done. */
1066 if (!real_extent || uirec.br_blockcount == 0)
1067 goto next_extent;
1068
1069 trace_xfs_reflink_remap(ip, uirec.br_startoff,
1070 uirec.br_blockcount, uirec.br_startblock);
1071
1072 /* Update the refcount tree */
Brian Foster0f37d172018-08-01 07:20:34 -07001073 error = xfs_refcount_increase_extent(tp, &uirec);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001074 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001075 goto out_cancel;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001076
1077 /* Map the new blocks into the data fork. */
Brian Foster0f37d172018-08-01 07:20:34 -07001078 error = xfs_bmap_map_extent(tp, ip, &uirec);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001079 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001080 goto out_cancel;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001081
1082 /* Update quota accounting. */
1083 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
1084 uirec.br_blockcount);
1085
1086 /* Update dest isize if needed. */
1087 newlen = XFS_FSB_TO_B(mp,
1088 uirec.br_startoff + uirec.br_blockcount);
1089 newlen = min_t(xfs_off_t, newlen, new_isize);
1090 if (newlen > i_size_read(VFS_I(ip))) {
1091 trace_xfs_reflink_update_inode_size(ip, newlen);
1092 i_size_write(VFS_I(ip), newlen);
1093 ip->i_d.di_size = newlen;
1094 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1095 }
1096
1097next_extent:
1098 /* Process all the deferred stuff. */
Brian Foster9e28a242018-07-24 13:43:15 -07001099 error = xfs_defer_finish(&tp);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001100 if (error)
Brian Fosterc8eac492018-07-24 13:43:13 -07001101 goto out_cancel;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001102 }
1103
1104 error = xfs_trans_commit(tp);
1105 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1106 if (error)
1107 goto out;
1108 return 0;
1109
Darrick J. Wong862bb362016-10-03 09:11:40 -07001110out_cancel:
1111 xfs_trans_cancel(tp);
1112 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1113out:
1114 trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
1115 return error;
1116}
1117
1118/*
1119 * Iteratively remap one file's extents (and holes) to another's.
1120 */
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +11001121int
Darrick J. Wong862bb362016-10-03 09:11:40 -07001122xfs_reflink_remap_blocks(
1123 struct xfs_inode *src,
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001124 loff_t pos_in,
Darrick J. Wong862bb362016-10-03 09:11:40 -07001125 struct xfs_inode *dest,
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001126 loff_t pos_out,
Darrick J. Wong3f68c1f2018-10-30 10:47:06 +11001127 loff_t remap_len,
1128 loff_t *remapped)
Darrick J. Wong862bb362016-10-03 09:11:40 -07001129{
1130 struct xfs_bmbt_irec imap;
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001131 xfs_fileoff_t srcoff;
1132 xfs_fileoff_t destoff;
1133 xfs_filblks_t len;
1134 xfs_filblks_t range_len;
Darrick J. Wong3f68c1f2018-10-30 10:47:06 +11001135 xfs_filblks_t remapped_len = 0;
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001136 xfs_off_t new_isize = pos_out + remap_len;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001137 int nimaps;
1138 int error = 0;
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001139
1140 destoff = XFS_B_TO_FSBT(src->i_mount, pos_out);
1141 srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in);
1142 len = XFS_B_TO_FSB(src->i_mount, remap_len);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001143
1144 /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
1145 while (len) {
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001146 uint lock_mode;
1147
Darrick J. Wong862bb362016-10-03 09:11:40 -07001148 trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
1149 dest, destoff);
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001150
Darrick J. Wong862bb362016-10-03 09:11:40 -07001151 /* Read extent from the source file */
1152 nimaps = 1;
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001153 lock_mode = xfs_ilock_data_map_shared(src);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001154 error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001155 xfs_iunlock(src, lock_mode);
Darrick J. Wong862bb362016-10-03 09:11:40 -07001156 if (error)
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001157 break;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001158 ASSERT(nimaps == 1);
1159
Christoph Hellwigbe225fe2019-02-15 08:02:46 -08001160 trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_DATA_FORK,
Darrick J. Wong862bb362016-10-03 09:11:40 -07001161 &imap);
1162
1163 /* Translate imap into the destination file. */
1164 range_len = imap.br_startoff + imap.br_blockcount - srcoff;
1165 imap.br_startoff += destoff - srcoff;
1166
1167 /* Clear dest from destoff to the end of imap and map it in. */
1168 error = xfs_reflink_remap_extent(dest, &imap, destoff,
1169 new_isize);
1170 if (error)
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001171 break;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001172
1173 if (fatal_signal_pending(current)) {
1174 error = -EINTR;
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001175 break;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001176 }
1177
1178 /* Advance drange/srange */
1179 srcoff += range_len;
1180 destoff += range_len;
1181 len -= range_len;
Darrick J. Wong3f68c1f2018-10-30 10:47:06 +11001182 remapped_len += range_len;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001183 }
1184
Darrick J. Wong9f04aaf2018-10-30 10:46:50 +11001185 if (error)
1186 trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
Darrick J. Wong3f68c1f2018-10-30 10:47:06 +11001187 *remapped = min_t(loff_t, remap_len,
1188 XFS_FSB_TO_B(src->i_mount, remapped_len));
Darrick J. Wong862bb362016-10-03 09:11:40 -07001189 return error;
1190}
1191
1192/*
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001193 * Grab the exclusive iolock for a data copy from src to dest, making
1194 * sure to abide vfs locking order (lowest pointer value goes first) and
1195 * breaking the pnfs layout leases on dest before proceeding. The loop
1196 * is needed because we cannot call the blocking break_layout() with the
1197 * src iolock held, and therefore have to back out both locks.
1198 */
1199static int
1200xfs_iolock_two_inodes_and_break_layout(
1201 struct inode *src,
1202 struct inode *dest)
1203{
1204 int error;
1205
1206retry:
1207 if (src < dest) {
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001208 inode_lock_shared(src);
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214
1215 error = break_layout(dest, false);
1216 if (error == -EWOULDBLOCK) {
1217 inode_unlock(dest);
1218 if (src < dest)
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001219 inode_unlock_shared(src);
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001220 error = break_layout(dest, true);
1221 if (error)
1222 return error;
1223 goto retry;
1224 }
1225 if (error) {
1226 inode_unlock(dest);
1227 if (src < dest)
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001228 inode_unlock_shared(src);
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001229 return error;
1230 }
1231 if (src > dest)
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001233 return 0;
1234}
1235
Darrick J. Wong0d41e1d2018-10-05 19:04:22 +10001236/* Unlock both inodes after they've been prepped for a range clone. */
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +11001237void
Darrick J. Wong0d41e1d2018-10-05 19:04:22 +10001238xfs_reflink_remap_unlock(
1239 struct file *file_in,
1240 struct file *file_out)
1241{
1242 struct inode *inode_in = file_inode(file_in);
1243 struct xfs_inode *src = XFS_I(inode_in);
1244 struct inode *inode_out = file_inode(file_out);
1245 struct xfs_inode *dest = XFS_I(inode_out);
1246 bool same_inode = (inode_in == inode_out);
1247
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
1251 inode_unlock(inode_out);
1252 if (!same_inode)
1253 inode_unlock_shared(inode_in);
1254}
1255
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001256/*
Darrick J. Wong410fdc72018-10-05 19:04:27 +10001257 * If we're reflinking to a point past the destination file's EOF, we must
1258 * zero any speculative post-EOF preallocations that sit between the old EOF
1259 * and the destination file offset.
1260 */
1261static int
1262xfs_reflink_zero_posteof(
1263 struct xfs_inode *ip,
1264 loff_t pos)
1265{
1266 loff_t isize = i_size_read(VFS_I(ip));
1267
1268 if (pos <= isize)
1269 return 0;
1270
1271 trace_xfs_zero_eof(ip, isize, pos - isize);
1272 return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
1273 &xfs_iomap_ops);
1274}
1275
1276/*
Darrick J. Wong0d41e1d2018-10-05 19:04:22 +10001277 * Prepare two files for range cloning. Upon a successful return both inodes
Dave Chinnerb3998902018-10-06 11:44:39 +10001278 * will have the iolock and mmaplock held, the page cache of the out file will
1279 * be truncated, and any leases on the out file will have been broken. This
1280 * function borrows heavily from xfs_file_aio_write_checks.
Dave Chinnerdceeb472018-10-06 11:44:19 +10001281 *
1282 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
1283 * checked that the bytes beyond EOF physically match. Hence we cannot use the
1284 * EOF block in the source dedupe range because it's not a complete block match,
Dave Chinnerb3998902018-10-06 11:44:39 +10001285 * hence can introduce a corruption into the file that has it's block replaced.
Dave Chinnerdceeb472018-10-06 11:44:19 +10001286 *
Dave Chinnerb3998902018-10-06 11:44:39 +10001287 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
1288 * "block aligned" for the purposes of cloning entire files. However, if the
1289 * source file range includes the EOF block and it lands within the existing EOF
1290 * of the destination file, then we can expose stale data from beyond the source
1291 * file EOF in the destination file.
1292 *
1293 * XFS doesn't support partial block sharing, so in both cases we have check
1294 * these cases ourselves. For dedupe, we can simply round the length to dedupe
1295 * down to the previous whole block and ignore the partial EOF block. While this
1296 * means we can't dedupe the last block of a file, this is an acceptible
1297 * tradeoff for simplicity on implementation.
1298 *
1299 * For cloning, we want to share the partial EOF block if it is also the new EOF
1300 * block of the destination file. If the partial EOF block lies inside the
1301 * existing destination EOF, then we have to abort the clone to avoid exposing
1302 * stale data in the destination file. Hence we reject these clone attempts with
1303 * -EINVAL in this case.
Darrick J. Wong862bb362016-10-03 09:11:40 -07001304 */
Darrick J. Wong3fc9f5e2018-10-30 10:47:26 +11001305int
Darrick J. Wong0d41e1d2018-10-05 19:04:22 +10001306xfs_reflink_remap_prep(
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001307 struct file *file_in,
1308 loff_t pos_in,
1309 struct file *file_out,
1310 loff_t pos_out,
Darrick J. Wong42ec3d42018-10-30 10:41:49 +11001311 loff_t *len,
Darrick J. Wonga91ae49b2018-10-30 10:41:28 +11001312 unsigned int remap_flags)
Darrick J. Wong862bb362016-10-03 09:11:40 -07001313{
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001314 struct inode *inode_in = file_inode(file_in);
1315 struct xfs_inode *src = XFS_I(inode_in);
1316 struct inode *inode_out = file_inode(file_out);
1317 struct xfs_inode *dest = XFS_I(inode_out);
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001318 bool same_inode = (inode_in == inode_out);
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001319 ssize_t ret;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001320
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001321 /* Lock both files against IO */
Darrick J. Wong1364b1d42018-01-18 13:55:20 -08001322 ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
1323 if (ret)
1324 return ret;
Christoph Hellwig65523212016-11-30 14:33:25 +11001325 if (same_inode)
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001326 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
Christoph Hellwig65523212016-11-30 14:33:25 +11001327 else
Darrick J. Wong01c2e132018-01-18 14:07:53 -08001328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
Darrick J. Wong7c2d2382018-01-26 15:27:33 -08001329 XFS_MMAPLOCK_EXCL);
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001330
Darrick J. Wong876bec6f2016-12-09 16:18:30 -08001331 /* Check file eligibility and prepare for block sharing. */
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001332 ret = -EINVAL;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001333 /* Don't reflink realtime inodes */
1334 if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001335 goto out_unlock;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001336
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001337 /* Don't share DAX file data for now. */
1338 if (IS_DAX(inode_in) || IS_DAX(inode_out))
1339 goto out_unlock;
Darrick J. Wongcc714662016-10-03 09:11:41 -07001340
Darrick J. Wonga83ab012018-10-30 10:41:08 +11001341 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
Darrick J. Wonga91ae49b2018-10-30 10:41:28 +11001342 len, remap_flags);
Darrick J. Wong8c5c8362018-10-30 10:42:24 +11001343 if (ret < 0 || *len == 0)
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001344 goto out_unlock;
1345
Darrick J. Wong09ac8622018-01-19 08:56:04 -08001346 /* Attach dquots to dest inode before changing block map */
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07001347 ret = xfs_qm_dqattach(dest);
Darrick J. Wong09ac8622018-01-19 08:56:04 -08001348 if (ret)
1349 goto out_unlock;
1350
Darrick J. Wong5c989a02017-12-10 18:03:54 -08001351 /*
Darrick J. Wong410fdc72018-10-05 19:04:27 +10001352 * Zero existing post-eof speculative preallocations in the destination
1353 * file.
Darrick J. Wong5c989a02017-12-10 18:03:54 -08001354 */
Darrick J. Wong410fdc72018-10-05 19:04:27 +10001355 ret = xfs_reflink_zero_posteof(dest, pos_out);
1356 if (ret)
1357 goto out_unlock;
Darrick J. Wong5c989a02017-12-10 18:03:54 -08001358
Darrick J. Wong876bec6f2016-12-09 16:18:30 -08001359 /* Set flags and remap blocks. */
Christoph Hellwig5faaf4f2016-10-20 15:50:07 +11001360 ret = xfs_reflink_set_inode_flag(src, dest);
1361 if (ret)
1362 goto out_unlock;
Darrick J. Wong862bb362016-10-03 09:11:40 -07001363
Dave Chinner2c307172018-11-19 13:31:10 -08001364 /*
1365 * If pos_out > EOF, we may have dirtied blocks between EOF and
1366 * pos_out. In that case, we need to extend the flush and unmap to cover
1367 * from EOF to the end of the copy length.
1368 */
1369 if (pos_out > XFS_ISIZE(dest)) {
1370 loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
1371 ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1372 } else {
1373 ret = xfs_flush_unmap_range(dest, pos_out, *len);
1374 }
1375 if (ret)
1376 goto out_unlock;
Darrick J. Wong7debbf02018-10-05 19:05:41 +10001377
Darrick J. Wong0d41e1d2018-10-05 19:04:22 +10001378 return 1;
1379out_unlock:
1380 xfs_reflink_remap_unlock(file_in, file_out);
1381 return ret;
1382}
1383
1384/*
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001385 * The user wants to preemptively CoW all shared blocks in this file,
1386 * which enables us to turn off the reflink flag. Iterate all
1387 * extents which are not prealloc/delalloc to see which ranges are
1388 * mentioned in the refcount tree, then read those blocks into the
1389 * pagecache, dirty them, fsync them back out, and then we can update
1390 * the inode flag. What happens if we run out of memory? :)
1391 */
1392STATIC int
1393xfs_reflink_dirty_extents(
1394 struct xfs_inode *ip,
1395 xfs_fileoff_t fbno,
1396 xfs_filblks_t end,
1397 xfs_off_t isize)
1398{
1399 struct xfs_mount *mp = ip->i_mount;
1400 xfs_agnumber_t agno;
1401 xfs_agblock_t agbno;
1402 xfs_extlen_t aglen;
1403 xfs_agblock_t rbno;
1404 xfs_extlen_t rlen;
1405 xfs_off_t fpos;
1406 xfs_off_t flen;
1407 struct xfs_bmbt_irec map[2];
1408 int nmaps;
Darrick J. Wong9780643c2016-10-10 16:49:18 +11001409 int error = 0;
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001410
1411 while (end - fbno > 0) {
1412 nmaps = 1;
1413 /*
1414 * Look for extents in the file. Skip holes, delalloc, or
1415 * unwritten extents; they can't be reflinked.
1416 */
1417 error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);
1418 if (error)
1419 goto out;
1420 if (nmaps == 0)
1421 break;
Christoph Hellwig9c4f29d2017-03-28 14:53:35 -07001422 if (!xfs_bmap_is_real_extent(&map[0]))
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001423 goto next;
1424
1425 map[1] = map[0];
1426 while (map[1].br_blockcount) {
1427 agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);
1428 agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
1429 aglen = map[1].br_blockcount;
1430
Darrick J. Wong92ff7282017-06-16 11:00:10 -07001431 error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
1432 aglen, &rbno, &rlen, true);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001433 if (error)
1434 goto out;
1435 if (rbno == NULLAGBLOCK)
1436 break;
1437
1438 /* Dirty the pages */
1439 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1440 fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +
1441 (rbno - agbno));
1442 flen = XFS_FSB_TO_B(mp, rlen);
1443 if (fpos + flen > isize)
1444 flen = isize - fpos;
1445 error = iomap_file_dirty(VFS_I(ip), fpos, flen,
1446 &xfs_iomap_ops);
1447 xfs_ilock(ip, XFS_ILOCK_EXCL);
1448 if (error)
1449 goto out;
1450
1451 map[1].br_blockcount -= (rbno - agbno + rlen);
1452 map[1].br_startoff += (rbno - agbno + rlen);
1453 map[1].br_startblock += (rbno - agbno + rlen);
1454 }
1455
1456next:
1457 fbno = map[0].br_startoff + map[0].br_blockcount;
1458 }
1459out:
1460 return error;
1461}
1462
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001463/* Does this inode need the reflink flag? */
1464int
1465xfs_reflink_inode_has_shared_extents(
1466 struct xfs_trans *tp,
1467 struct xfs_inode *ip,
1468 bool *has_shared)
1469{
1470 struct xfs_bmbt_irec got;
1471 struct xfs_mount *mp = ip->i_mount;
1472 struct xfs_ifork *ifp;
1473 xfs_agnumber_t agno;
1474 xfs_agblock_t agbno;
1475 xfs_extlen_t aglen;
1476 xfs_agblock_t rbno;
1477 xfs_extlen_t rlen;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001478 struct xfs_iext_cursor icur;
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001479 bool found;
1480 int error;
1481
1482 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1483 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1484 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1485 if (error)
1486 return error;
1487 }
1488
1489 *has_shared = false;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001490 found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001491 while (found) {
1492 if (isnullstartblock(got.br_startblock) ||
1493 got.br_state != XFS_EXT_NORM)
1494 goto next;
1495 agno = XFS_FSB_TO_AGNO(mp, got.br_startblock);
1496 agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
1497 aglen = got.br_blockcount;
1498
1499 error = xfs_reflink_find_shared(mp, tp, agno, agbno, aglen,
1500 &rbno, &rlen, false);
1501 if (error)
1502 return error;
1503 /* Is there still a shared block here? */
1504 if (rbno != NULLAGBLOCK) {
1505 *has_shared = true;
1506 return 0;
1507 }
1508next:
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001509 found = xfs_iext_next_extent(ifp, &icur, &got);
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001510 }
1511
1512 return 0;
1513}
1514
Dave Chinner844e5e72018-05-09 07:49:10 -07001515/*
1516 * Clear the inode reflink flag if there are no shared extents.
1517 *
1518 * The caller is responsible for joining the inode to the transaction passed in.
1519 * The inode will be joined to the transaction that is returned to the caller.
1520 */
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001521int
1522xfs_reflink_clear_inode_flag(
1523 struct xfs_inode *ip,
1524 struct xfs_trans **tpp)
1525{
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001526 bool needs_flag;
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001527 int error = 0;
1528
Darrick J. Wong63646fc2016-10-10 16:47:32 +11001529 ASSERT(xfs_is_reflink_inode(ip));
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001530
Darrick J. Wongea7cdd72017-06-16 11:00:11 -07001531 error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
1532 if (error || needs_flag)
1533 return error;
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001534
1535 /*
1536 * We didn't find any shared blocks so turn off the reflink flag.
1537 * First, get rid of any leftover CoW mappings.
1538 */
Christoph Hellwig3802a342017-03-07 16:45:58 -08001539 error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001540 if (error)
1541 return error;
1542
1543 /* Clear the inode flag. */
1544 trace_xfs_reflink_unset_inode_flag(ip);
1545 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
Darrick J. Wong83104d42016-10-03 09:11:46 -07001546 xfs_inode_clear_cowblocks_tag(ip);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001547 xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1548
1549 return error;
1550}
1551
1552/*
1553 * Clear the inode reflink flag if there are no shared extents and the size
1554 * hasn't changed.
1555 */
1556STATIC int
1557xfs_reflink_try_clear_inode_flag(
Darrick J. Wong97a1b872016-10-10 16:49:01 +11001558 struct xfs_inode *ip)
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001559{
1560 struct xfs_mount *mp = ip->i_mount;
1561 struct xfs_trans *tp;
1562 int error = 0;
1563
1564 /* Start a rolling transaction to remove the mappings */
1565 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1566 if (error)
1567 return error;
1568
1569 xfs_ilock(ip, XFS_ILOCK_EXCL);
1570 xfs_trans_ijoin(tp, ip, 0);
1571
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001572 error = xfs_reflink_clear_inode_flag(ip, &tp);
1573 if (error)
1574 goto cancel;
1575
1576 error = xfs_trans_commit(tp);
1577 if (error)
1578 goto out;
1579
1580 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1581 return 0;
1582cancel:
1583 xfs_trans_cancel(tp);
1584out:
1585 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1586 return error;
1587}
1588
1589/*
1590 * Pre-COW all shared blocks within a given byte range of a file and turn off
1591 * the reflink flag if we unshare all of the file's blocks.
1592 */
1593int
1594xfs_reflink_unshare(
1595 struct xfs_inode *ip,
1596 xfs_off_t offset,
1597 xfs_off_t len)
1598{
1599 struct xfs_mount *mp = ip->i_mount;
1600 xfs_fileoff_t fbno;
1601 xfs_filblks_t end;
1602 xfs_off_t isize;
1603 int error;
1604
1605 if (!xfs_is_reflink_inode(ip))
1606 return 0;
1607
1608 trace_xfs_reflink_unshare(ip, offset, len);
1609
1610 inode_dio_wait(VFS_I(ip));
1611
1612 /* Try to CoW the selected ranges */
1613 xfs_ilock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong97a1b872016-10-10 16:49:01 +11001614 fbno = XFS_B_TO_FSBT(mp, offset);
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001615 isize = i_size_read(VFS_I(ip));
1616 end = XFS_B_TO_FSB(mp, offset + len);
1617 error = xfs_reflink_dirty_extents(ip, fbno, end, isize);
1618 if (error)
1619 goto out_unlock;
1620 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1621
1622 /* Wait for the IO to finish */
1623 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1624 if (error)
1625 goto out;
1626
Darrick J. Wong97a1b872016-10-10 16:49:01 +11001627 /* Turn off the reflink flag if possible. */
1628 error = xfs_reflink_try_clear_inode_flag(ip);
1629 if (error)
1630 goto out;
Darrick J. Wong98cc2db2016-10-03 09:11:43 -07001631
1632 return 0;
1633
1634out_unlock:
1635 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1636out:
1637 trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
1638 return error;
1639}