blob: f225707c89beefd511c780c0198e587c2a716cc0 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Dave Chinner68988112013-08-12 20:49:42 +10002/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10004 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10005 * All Rights Reserved.
Dave Chinner68988112013-08-12 20:49:42 +10006 */
7#include "xfs.h"
8#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11009#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100013#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100014#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110015#include "xfs_da_format.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100016#include "xfs_defer.h"
Dave Chinner68988112013-08-12 20:49:42 +100017#include "xfs_inode.h"
18#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110019#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100020#include "xfs_extfree_item.h"
21#include "xfs_alloc.h"
22#include "xfs_bmap.h"
23#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110024#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_rtalloc.h"
26#include "xfs_error.h"
27#include "xfs_quota.h"
28#include "xfs_trans_space.h"
29#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100030#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110031#include "xfs_log.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100032#include "xfs_rmap_btree.h"
Darrick J. Wongf86f4032016-10-03 09:11:41 -070033#include "xfs_iomap.h"
34#include "xfs_reflink.h"
35#include "xfs_refcount.h"
Dave Chinner68988112013-08-12 20:49:42 +100036
37/* Kernel only BMAP related definitions and functions */
38
39/*
40 * Convert the given file system block to a disk block. We have to treat it
41 * differently based on whether the file is a real time file or not, because the
42 * bmap code does.
43 */
44xfs_daddr_t
45xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
46{
47 return (XFS_IS_REALTIME_INODE(ip) ? \
48 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
49 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
50}
51
52/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110053 * Routine to zero an extent on disk allocated to the specific inode.
54 *
55 * The VFS functions take a linearised filesystem block offset, so we have to
56 * convert the sparse xfs fsb to the right format first.
57 * VFS types are real funky, too.
58 */
59int
60xfs_zero_extent(
61 struct xfs_inode *ip,
62 xfs_fsblock_t start_fsb,
63 xfs_off_t count_fsb)
64{
65 struct xfs_mount *mp = ip->i_mount;
66 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
67 sector_t block = XFS_BB_TO_FSBT(mp, sector);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110068
Matthew Wilcox3dc29162016-03-15 11:20:41 -060069 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
70 block << (mp->m_super->s_blocksize_bits - 9),
71 count_fsb << (mp->m_super->s_blocksize_bits - 9),
Christoph Hellwigee472d82017-04-05 19:21:08 +020072 GFP_NOFS, 0);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110073}
74
Dave Chinnerbb9c2e52017-10-09 11:37:22 -070075#ifdef CONFIG_XFS_RT
Dave Chinner68988112013-08-12 20:49:42 +100076int
77xfs_bmap_rtalloc(
78 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
79{
Dave Chinner68988112013-08-12 20:49:42 +100080 int error; /* error return value */
81 xfs_mount_t *mp; /* mount point structure */
82 xfs_extlen_t prod = 0; /* product factor for allocators */
Dave Chinner0703a8e2018-06-08 09:54:22 -070083 xfs_extlen_t mod = 0; /* product factor for allocators */
Dave Chinner68988112013-08-12 20:49:42 +100084 xfs_extlen_t ralen = 0; /* realtime allocation length */
85 xfs_extlen_t align; /* minimum allocation alignment */
86 xfs_rtblock_t rtb;
87
88 mp = ap->ip->i_mount;
89 align = xfs_get_extsz_hint(ap->ip);
90 prod = align / mp->m_sb.sb_rextsize;
91 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
92 align, 1, ap->eof, 0,
93 ap->conv, &ap->offset, &ap->length);
94 if (error)
95 return error;
96 ASSERT(ap->length);
97 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
98
99 /*
100 * If the offset & length are not perfectly aligned
101 * then kill prod, it will just get us in trouble.
102 */
Dave Chinner0703a8e2018-06-08 09:54:22 -0700103 div_u64_rem(ap->offset, align, &mod);
104 if (mod || ap->length % align)
Dave Chinner68988112013-08-12 20:49:42 +1000105 prod = 1;
106 /*
107 * Set ralen to be the actual requested length in rtextents.
108 */
109 ralen = ap->length / mp->m_sb.sb_rextsize;
110 /*
111 * If the old value was close enough to MAXEXTLEN that
112 * we rounded up to it, cut it back so it's valid again.
113 * Note that if it's a really large request (bigger than
114 * MAXEXTLEN), we don't hear about that number, and can't
115 * adjust the starting point to match it.
116 */
117 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
118 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
119
120 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100121 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000122 */
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000123 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
Dave Chinner68988112013-08-12 20:49:42 +1000124 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000125 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
Dave Chinner4b680af2016-02-08 10:46:51 +1100126 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000127
128 /*
129 * If it's an allocation to an empty file at offset 0,
130 * pick an extent that will space things out in the rt area.
131 */
132 if (ap->eof && ap->offset == 0) {
133 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
134
135 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
136 if (error)
137 return error;
138 ap->blkno = rtx * mp->m_sb.sb_rextsize;
139 } else {
140 ap->blkno = 0;
141 }
142
143 xfs_bmap_adjacent(ap);
144
145 /*
146 * Realtime allocation, done through xfs_rtallocate_extent.
147 */
Dave Chinner68988112013-08-12 20:49:42 +1000148 do_div(ap->blkno, mp->m_sb.sb_rextsize);
149 rtb = ap->blkno;
150 ap->length = ralen;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800151 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
152 &ralen, ap->wasdel, prod, &rtb);
153 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000154 return error;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800155
Dave Chinner68988112013-08-12 20:49:42 +1000156 ap->blkno = rtb;
157 if (ap->blkno != NULLFSBLOCK) {
158 ap->blkno *= mp->m_sb.sb_rextsize;
159 ralen *= mp->m_sb.sb_rextsize;
160 ap->length = ralen;
161 ap->ip->i_d.di_nblocks += ralen;
162 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
163 if (ap->wasdel)
164 ap->ip->i_delayed_blks -= ralen;
165 /*
166 * Adjust the disk quota also. This was reserved
167 * earlier.
168 */
169 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
170 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
171 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100172
173 /* Zero the extent if we were asked to do so */
Dave Chinner292378e2016-09-26 08:21:28 +1000174 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100175 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
176 if (error)
177 return error;
178 }
Dave Chinner68988112013-08-12 20:49:42 +1000179 } else {
180 ap->length = 0;
181 }
182 return 0;
183}
Dave Chinnerbb9c2e52017-10-09 11:37:22 -0700184#endif /* CONFIG_XFS_RT */
Dave Chinner68988112013-08-12 20:49:42 +1000185
186/*
Dave Chinner68988112013-08-12 20:49:42 +1000187 * Check if the endoff is outside the last extent. If so the caller will grow
188 * the allocation to a stripe unit boundary. All offsets are considered outside
189 * the end of file for an empty fork, so 1 is returned in *eof in that case.
190 */
191int
192xfs_bmap_eof(
193 struct xfs_inode *ip,
194 xfs_fileoff_t endoff,
195 int whichfork,
196 int *eof)
197{
198 struct xfs_bmbt_irec rec;
199 int error;
200
201 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
202 if (error || *eof)
203 return error;
204
205 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
206 return 0;
207}
208
209/*
210 * Extent tree block counting routines.
211 */
212
213/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700214 * Count leaf blocks given a range of extent records. Delayed allocation
215 * extents are not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000216 */
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700217xfs_extnum_t
Dave Chinner68988112013-08-12 20:49:42 +1000218xfs_bmap_count_leaves(
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700219 struct xfs_ifork *ifp,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700220 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000221{
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700222 struct xfs_iext_cursor icur;
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700223 struct xfs_bmbt_irec got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700224 xfs_extnum_t numrecs = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000225
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700226 for_each_xfs_iext(ifp, &icur, &got) {
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700227 if (!isnullstartblock(got.br_startblock)) {
228 *count += got.br_blockcount;
229 numrecs++;
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700230 }
Dave Chinner68988112013-08-12 20:49:42 +1000231 }
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700232
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700233 return numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000234}
235
236/*
237 * Count leaf blocks given a range of extent records originally
238 * in btree format.
239 */
240STATIC void
241xfs_bmap_disk_count_leaves(
242 struct xfs_mount *mp,
243 struct xfs_btree_block *block,
244 int numrecs,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700245 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000246{
247 int b;
248 xfs_bmbt_rec_t *frp;
249
250 for (b = 1; b <= numrecs; b++) {
251 frp = XFS_BMBT_REC_ADDR(mp, block, b);
252 *count += xfs_bmbt_disk_get_blockcount(frp);
253 }
254}
255
256/*
257 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000258 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000259 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700260STATIC int
Dave Chinner68988112013-08-12 20:49:42 +1000261xfs_bmap_count_tree(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700262 struct xfs_mount *mp,
263 struct xfs_trans *tp,
264 struct xfs_ifork *ifp,
265 xfs_fsblock_t blockno,
266 int levelin,
267 xfs_extnum_t *nextents,
268 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000269{
270 int error;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700271 struct xfs_buf *bp, *nbp;
Dave Chinner68988112013-08-12 20:49:42 +1000272 int level = levelin;
273 __be64 *pp;
274 xfs_fsblock_t bno = blockno;
275 xfs_fsblock_t nextbno;
276 struct xfs_btree_block *block, *nextblock;
277 int numrecs;
278
279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
280 &xfs_bmbt_buf_ops);
281 if (error)
282 return error;
283 *count += 1;
284 block = XFS_BUF_TO_BLOCK(bp);
285
286 if (--level) {
287 /* Not at node above leaves, count this level of nodes */
288 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
289 while (nextbno != NULLFSBLOCK) {
290 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
291 XFS_BMAP_BTREE_REF,
292 &xfs_bmbt_buf_ops);
293 if (error)
294 return error;
295 *count += 1;
296 nextblock = XFS_BUF_TO_BLOCK(nbp);
297 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
298 xfs_trans_brelse(tp, nbp);
299 }
300
301 /* Dive to the next level */
302 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
303 bno = be64_to_cpu(*pp);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700304 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
305 count);
306 if (error) {
Dave Chinner68988112013-08-12 20:49:42 +1000307 xfs_trans_brelse(tp, bp);
308 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
309 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000310 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000311 }
312 xfs_trans_brelse(tp, bp);
313 } else {
314 /* count all level 1 nodes and their leaves */
315 for (;;) {
316 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
317 numrecs = be16_to_cpu(block->bb_numrecs);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700318 (*nextents) += numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000319 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
320 xfs_trans_brelse(tp, bp);
321 if (nextbno == NULLFSBLOCK)
322 break;
323 bno = nextbno;
324 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
325 XFS_BMAP_BTREE_REF,
326 &xfs_bmbt_buf_ops);
327 if (error)
328 return error;
329 *count += 1;
330 block = XFS_BUF_TO_BLOCK(bp);
331 }
332 }
333 return 0;
334}
335
336/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700337 * Count fsblocks of the given fork. Delayed allocation extents are
338 * not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000339 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700340int
Dave Chinner68988112013-08-12 20:49:42 +1000341xfs_bmap_count_blocks(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700342 struct xfs_trans *tp,
343 struct xfs_inode *ip,
344 int whichfork,
345 xfs_extnum_t *nextents,
346 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000347{
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700348 struct xfs_mount *mp; /* file system mount structure */
Dave Chinner68988112013-08-12 20:49:42 +1000349 __be64 *pp; /* pointer to block address */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700350 struct xfs_btree_block *block; /* current btree block */
351 struct xfs_ifork *ifp; /* fork structure */
352 xfs_fsblock_t bno; /* block # of "block" */
353 int level; /* btree level, for checking */
354 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000355
356 bno = NULLFSBLOCK;
357 mp = ip->i_mount;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700358 *nextents = 0;
359 *count = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000360 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700361 if (!ifp)
Dave Chinner68988112013-08-12 20:49:42 +1000362 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000363
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700364 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
365 case XFS_DINODE_FMT_EXTENTS:
Christoph Hellwige17a5c62017-08-29 15:44:14 -0700366 *nextents = xfs_bmap_count_leaves(ifp, count);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700367 return 0;
368 case XFS_DINODE_FMT_BTREE:
369 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
370 error = xfs_iread_extents(tp, ip, whichfork);
371 if (error)
372 return error;
373 }
Dave Chinner68988112013-08-12 20:49:42 +1000374
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700375 /*
376 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
377 */
378 block = ifp->if_broot;
379 level = be16_to_cpu(block->bb_level);
380 ASSERT(level > 0);
381 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
382 bno = be64_to_cpu(*pp);
383 ASSERT(bno != NULLFSBLOCK);
384 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
385 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
386
387 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
388 nextents, count);
389 if (error) {
390 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
391 XFS_ERRLEVEL_LOW, mp);
392 return -EFSCORRUPTED;
393 }
394 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000395 }
396
397 return 0;
398}
399
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700400static int
401xfs_getbmap_report_one(
402 struct xfs_inode *ip,
403 struct getbmapx *bmv,
Christoph Hellwig232b51942017-10-17 14:16:19 -0700404 struct kgetbmap *out,
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700405 int64_t bmv_end,
406 struct xfs_bmbt_irec *got)
Dave Chinner68988112013-08-12 20:49:42 +1000407{
Christoph Hellwig232b51942017-10-17 14:16:19 -0700408 struct kgetbmap *p = out + bmv->bmv_entries;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700409 bool shared = false, trimmed = false;
410 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000411
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700412 error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700413 if (error)
414 return error;
415
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700416 if (isnullstartblock(got->br_startblock) ||
417 got->br_startblock == DELAYSTARTBLOCK) {
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700418 /*
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700419 * Delalloc extents that start beyond EOF can occur due to
420 * speculative EOF allocation when the delalloc extent is larger
421 * than the largest freespace extent at conversion time. These
422 * extents cannot be converted by data writeback, so can exist
423 * here even if we are not supposed to be finding delalloc
424 * extents.
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700425 */
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700426 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
427 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
428
429 p->bmv_oflags |= BMV_OF_DELALLOC;
430 p->bmv_block = -2;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700431 } else {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700432 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700433 }
434
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700435 if (got->br_state == XFS_EXT_UNWRITTEN &&
436 (bmv->bmv_iflags & BMV_IF_PREALLOC))
437 p->bmv_oflags |= BMV_OF_PREALLOC;
438
439 if (shared)
440 p->bmv_oflags |= BMV_OF_SHARED;
441
442 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
443 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
444
445 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
446 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
447 bmv->bmv_entries++;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700448 return 0;
449}
450
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700451static void
452xfs_getbmap_report_hole(
453 struct xfs_inode *ip,
454 struct getbmapx *bmv,
Christoph Hellwig232b51942017-10-17 14:16:19 -0700455 struct kgetbmap *out,
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700456 int64_t bmv_end,
457 xfs_fileoff_t bno,
458 xfs_fileoff_t end)
459{
Christoph Hellwig232b51942017-10-17 14:16:19 -0700460 struct kgetbmap *p = out + bmv->bmv_entries;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700461
462 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
463 return;
464
465 p->bmv_block = -1;
466 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
467 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
468
469 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
470 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
471 bmv->bmv_entries++;
472}
473
474static inline bool
475xfs_getbmap_full(
476 struct getbmapx *bmv)
477{
478 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
479}
480
481static bool
482xfs_getbmap_next_rec(
483 struct xfs_bmbt_irec *rec,
484 xfs_fileoff_t total_end)
485{
486 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
487
488 if (end == total_end)
489 return false;
490
491 rec->br_startoff += rec->br_blockcount;
492 if (!isnullstartblock(rec->br_startblock) &&
493 rec->br_startblock != DELAYSTARTBLOCK)
494 rec->br_startblock += rec->br_blockcount;
495 rec->br_blockcount = total_end - end;
496 return true;
497}
498
Dave Chinner68988112013-08-12 20:49:42 +1000499/*
500 * Get inode's extents as described in bmv, and format for output.
501 * Calls formatter to fill the user's buffer until all extents
502 * are mapped, until the passed-in bmv->bmv_count slots have
503 * been filled, or until the formatter short-circuits the loop,
504 * if it is tracking filled-in extents on its own.
505 */
506int /* error code */
507xfs_getbmap(
Christoph Hellwig232b51942017-10-17 14:16:19 -0700508 struct xfs_inode *ip,
Dave Chinner68988112013-08-12 20:49:42 +1000509 struct getbmapx *bmv, /* user bmap structure */
Christoph Hellwig232b51942017-10-17 14:16:19 -0700510 struct kgetbmap *out)
Dave Chinner68988112013-08-12 20:49:42 +1000511{
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700512 struct xfs_mount *mp = ip->i_mount;
513 int iflags = bmv->bmv_iflags;
Christoph Hellwig232b51942017-10-17 14:16:19 -0700514 int whichfork, lock, error = 0;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700515 int64_t bmv_end, max_len;
516 xfs_fileoff_t bno, first_bno;
517 struct xfs_ifork *ifp;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700518 struct xfs_bmbt_irec got, rec;
519 xfs_filblks_t len;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700520 struct xfs_iext_cursor icur;
Dave Chinner68988112013-08-12 20:49:42 +1000521
Christoph Hellwig232b51942017-10-17 14:16:19 -0700522 if (bmv->bmv_iflags & ~BMV_IF_VALID)
523 return -EINVAL;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700524#ifndef DEBUG
525 /* Only allow CoW fork queries if we're debugging. */
526 if (iflags & BMV_IF_COWFORK)
527 return -EINVAL;
528#endif
529 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
530 return -EINVAL;
531
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700532 if (bmv->bmv_length < -1)
533 return -EINVAL;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700534 bmv->bmv_entries = 0;
535 if (bmv->bmv_length == 0)
536 return 0;
537
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700538 if (iflags & BMV_IF_ATTRFORK)
539 whichfork = XFS_ATTR_FORK;
540 else if (iflags & BMV_IF_COWFORK)
541 whichfork = XFS_COW_FORK;
542 else
543 whichfork = XFS_DATA_FORK;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700544 ifp = XFS_IFORK_PTR(ip, whichfork);
Dave Chinner68988112013-08-12 20:49:42 +1000545
546 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700547 switch (whichfork) {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700548 case XFS_ATTR_FORK:
549 if (!XFS_IFORK_Q(ip))
550 goto out_unlock_iolock;
551
552 max_len = 1LL << 32;
553 lock = xfs_ilock_attr_map_shared(ip);
554 break;
555 case XFS_COW_FORK:
556 /* No CoW fork? Just return */
557 if (!ifp)
558 goto out_unlock_iolock;
559
560 if (xfs_get_cowextsz_hint(ip))
561 max_len = mp->m_super->s_maxbytes;
562 else
563 max_len = XFS_ISIZE(ip);
564
565 lock = XFS_ILOCK_SHARED;
566 xfs_ilock(ip, lock);
567 break;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700568 case XFS_DATA_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800569 if (!(iflags & BMV_IF_DELALLOC) &&
570 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000571 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000572 if (error)
573 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000574
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800575 /*
576 * Even after flushing the inode, there can still be
577 * delalloc blocks on the inode beyond EOF due to
578 * speculative preallocation. These are not removed
579 * until the release function is called or the inode
580 * is inactivated. Hence we cannot assert here that
581 * ip->i_delayed_blks == 0.
582 */
583 }
584
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700585 if (xfs_get_extsz_hint(ip) ||
586 (ip->i_d.di_flags &
587 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
588 max_len = mp->m_super->s_maxbytes;
589 else
590 max_len = XFS_ISIZE(ip);
591
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800592 lock = xfs_ilock_data_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700593 break;
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800594 }
Dave Chinner68988112013-08-12 20:49:42 +1000595
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700596 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
597 case XFS_DINODE_FMT_EXTENTS:
598 case XFS_DINODE_FMT_BTREE:
599 break;
600 case XFS_DINODE_FMT_LOCAL:
601 /* Local format inode forks report no extents. */
Dave Chinner68988112013-08-12 20:49:42 +1000602 goto out_unlock_ilock;
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700603 default:
604 error = -EINVAL;
605 goto out_unlock_ilock;
Dave Chinner68988112013-08-12 20:49:42 +1000606 }
607
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700608 if (bmv->bmv_length == -1) {
609 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
610 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
611 }
612
613 bmv_end = bmv->bmv_offset + bmv->bmv_length;
614
615 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
616 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
617
618 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
619 error = xfs_iread_extents(NULL, ip, whichfork);
Dave Chinner68988112013-08-12 20:49:42 +1000620 if (error)
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700621 goto out_unlock_ilock;
622 }
Dave Chinner68988112013-08-12 20:49:42 +1000623
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700624 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700625 /*
626 * Report a whole-file hole if the delalloc flag is set to
627 * stay compatible with the old implementation.
628 */
629 if (iflags & BMV_IF_DELALLOC)
630 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
631 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
632 goto out_unlock_ilock;
633 }
Dave Chinner68988112013-08-12 20:49:42 +1000634
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700635 while (!xfs_getbmap_full(bmv)) {
636 xfs_trim_extent(&got, first_bno, len);
Dave Chinner68988112013-08-12 20:49:42 +1000637
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700638 /*
639 * Report an entry for a hole if this extent doesn't directly
640 * follow the previous one.
641 */
642 if (got.br_startoff > bno) {
643 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
644 got.br_startoff);
645 if (xfs_getbmap_full(bmv))
646 break;
Dave Chinner68988112013-08-12 20:49:42 +1000647 }
Dave Chinner68988112013-08-12 20:49:42 +1000648
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700649 /*
650 * In order to report shared extents accurately, we report each
651 * distinct shared / unshared part of a single bmbt record with
652 * an individual getbmapx record.
653 */
654 bno = got.br_startoff + got.br_blockcount;
655 rec = got;
656 do {
657 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
658 &rec);
659 if (error || xfs_getbmap_full(bmv))
660 goto out_unlock_ilock;
661 } while (xfs_getbmap_next_rec(&rec, bno));
662
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700663 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700664 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
665
666 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
667
668 if (whichfork != XFS_ATTR_FORK && bno < end &&
669 !xfs_getbmap_full(bmv)) {
670 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
671 bno, end);
672 }
673 break;
674 }
675
676 if (bno >= first_bno + len)
677 break;
678 }
679
680out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800681 xfs_iunlock(ip, lock);
Christoph Hellwigabbf9e82017-10-17 14:16:18 -0700682out_unlock_iolock:
Dave Chinner68988112013-08-12 20:49:42 +1000683 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Dave Chinner68988112013-08-12 20:49:42 +1000684 return error;
685}
686
687/*
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700688 * Dead simple method of punching delalyed allocation blocks from a range in
689 * the inode. This will always punch out both the start and end blocks, even
690 * if the ranges only partially overlap them, so it is up to the caller to
691 * ensure that partial blocks are not passed in.
Dave Chinner68988112013-08-12 20:49:42 +1000692 */
693int
694xfs_bmap_punch_delalloc_range(
695 struct xfs_inode *ip,
696 xfs_fileoff_t start_fsb,
697 xfs_fileoff_t length)
698{
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700699 struct xfs_ifork *ifp = &ip->i_df;
700 xfs_fileoff_t end_fsb = start_fsb + length;
701 struct xfs_bmbt_irec got, del;
702 struct xfs_iext_cursor icur;
Dave Chinner68988112013-08-12 20:49:42 +1000703 int error = 0;
704
Christoph Hellwigd4380172018-07-11 22:25:57 -0700705 xfs_ilock(ip, XFS_ILOCK_EXCL);
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700706 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
707 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
Dave Chinner68988112013-08-12 20:49:42 +1000708 if (error)
Christoph Hellwigd4380172018-07-11 22:25:57 -0700709 goto out_unlock;
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700710 }
Dave Chinner68988112013-08-12 20:49:42 +1000711
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700712 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
Christoph Hellwigd4380172018-07-11 22:25:57 -0700713 goto out_unlock;
Christoph Hellwige2ac8362018-06-21 23:24:38 -0700714
715 while (got.br_startoff + got.br_blockcount > start_fsb) {
716 del = got;
717 xfs_trim_extent(&del, start_fsb, length);
718
719 /*
720 * A delete can push the cursor forward. Step back to the
721 * previous extent on non-delalloc or extents outside the
722 * target range.
723 */
724 if (!del.br_blockcount ||
725 !isnullstartblock(del.br_startblock)) {
726 if (!xfs_iext_prev_extent(ifp, &icur, &got))
727 break;
728 continue;
729 }
730
731 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
732 &got, &del);
733 if (error || !xfs_iext_get_extent(ifp, &icur, &got))
734 break;
735 }
Dave Chinner68988112013-08-12 20:49:42 +1000736
Christoph Hellwigd4380172018-07-11 22:25:57 -0700737out_unlock:
738 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000739 return error;
740}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000741
742/*
743 * Test whether it is appropriate to check an inode for and free post EOF
744 * blocks. The 'force' parameter determines whether we should also consider
745 * regular files that are marked preallocated or append-only.
746 */
747bool
748xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
749{
750 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100751 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000752 return false;
753
754 /*
755 * Zero sized files with no cached pages and delalloc blocks will not
756 * have speculative prealloc/delalloc blocks to remove.
757 */
758 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000759 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000760 ip->i_delayed_blks == 0)
761 return false;
762
763 /* If we haven't read in the extent list, then don't do it now. */
764 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
765 return false;
766
767 /*
768 * Do not free real preallocated or append-only files unless the file
769 * has delalloc blocks and we are forced to remove them.
770 */
771 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
772 if (!force || ip->i_delayed_blks == 0)
773 return false;
774
775 return true;
776}
777
778/*
Brian Foster3b4683c2017-04-11 10:50:05 -0700779 * This is called to free any blocks beyond eof. The caller must hold
780 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
781 * reference to the inode.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000782 */
783int
784xfs_free_eofblocks(
Brian Fostera36b9262017-01-27 23:22:55 -0800785 struct xfs_inode *ip)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000786{
Brian Fostera36b9262017-01-27 23:22:55 -0800787 struct xfs_trans *tp;
788 int error;
789 xfs_fileoff_t end_fsb;
790 xfs_fileoff_t last_fsb;
791 xfs_filblks_t map_len;
792 int nimaps;
793 struct xfs_bmbt_irec imap;
794 struct xfs_mount *mp = ip->i_mount;
795
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000796 /*
797 * Figure out if there are any blocks beyond the end
798 * of the file. If not, then there is nothing to do.
799 */
800 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
801 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
802 if (last_fsb <= end_fsb)
803 return 0;
804 map_len = last_fsb - end_fsb;
805
806 nimaps = 1;
807 xfs_ilock(ip, XFS_ILOCK_SHARED);
808 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
809 xfs_iunlock(ip, XFS_ILOCK_SHARED);
810
Brian Fostera36b9262017-01-27 23:22:55 -0800811 /*
812 * If there are blocks after the end of file, truncate the file to its
813 * current size to free them up.
814 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000815 if (!error && (nimaps != 0) &&
816 (imap.br_startblock != HOLESTARTBLOCK ||
817 ip->i_delayed_blks)) {
818 /*
819 * Attach the dquots to the inode up front.
820 */
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -0700821 error = xfs_qm_dqattach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000822 if (error)
823 return error;
824
Brian Fostere4229d6b2017-01-27 23:22:57 -0800825 /* wait on dio to ensure i_size has settled */
826 inode_dio_wait(VFS_I(ip));
827
Christoph Hellwig253f4912016-04-06 09:19:55 +1000828 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
829 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000830 if (error) {
831 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000832 return error;
833 }
834
835 xfs_ilock(ip, XFS_ILOCK_EXCL);
836 xfs_trans_ijoin(tp, ip, 0);
837
838 /*
839 * Do not update the on-disk file size. If we update the
840 * on-disk file size and then the system crashes before the
841 * contents of the file are flushed to disk then the files
842 * may be full of holes (ie NULL files bug).
843 */
Brian Foster4e529332018-05-10 09:35:42 -0700844 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
845 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000846 if (error) {
847 /*
848 * If we get an error at this point we simply don't
849 * bother truncating the file.
850 */
Christoph Hellwig4906e212015-06-04 13:47:56 +1000851 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000852 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +1000853 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000854 if (!error)
855 xfs_inode_clear_eofblocks_tag(ip);
856 }
857
858 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000859 }
860 return error;
861}
862
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700863int
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000864xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -0700865 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000866 xfs_off_t offset,
867 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -0700868 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000869{
870 xfs_mount_t *mp = ip->i_mount;
871 xfs_off_t count;
872 xfs_filblks_t allocated_fsb;
873 xfs_filblks_t allocatesize_fsb;
874 xfs_extlen_t extsz, temp;
875 xfs_fileoff_t startoffset_fsb;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000876 int nimaps;
877 int quota_flag;
878 int rt;
879 xfs_trans_t *tp;
880 xfs_bmbt_irec_t imaps[1], *imapp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000881 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000882 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000883 int error;
884
885 trace_xfs_alloc_file_space(ip);
886
887 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +1000888 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000889
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -0700890 error = xfs_qm_dqattach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000891 if (error)
892 return error;
893
894 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000895 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000896
897 rt = XFS_IS_REALTIME_INODE(ip);
898 extsz = xfs_get_extsz_hint(ip);
899
900 count = len;
901 imapp = &imaps[0];
902 nimaps = 1;
903 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
904 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
905
906 /*
907 * Allocate file space until done or until there is an error
908 */
909 while (allocatesize_fsb && !error) {
910 xfs_fileoff_t s, e;
911
912 /*
913 * Determine space reservations for data/realtime.
914 */
915 if (unlikely(extsz)) {
916 s = startoffset_fsb;
917 do_div(s, extsz);
918 s *= extsz;
919 e = startoffset_fsb + allocatesize_fsb;
Dave Chinner0703a8e2018-06-08 09:54:22 -0700920 div_u64_rem(startoffset_fsb, extsz, &temp);
921 if (temp)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000922 e += temp;
Dave Chinner0703a8e2018-06-08 09:54:22 -0700923 div_u64_rem(e, extsz, &temp);
924 if (temp)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000925 e += extsz - temp;
926 } else {
927 s = 0;
928 e = allocatesize_fsb;
929 }
930
931 /*
932 * The transaction reservation is limited to a 32-bit block
933 * count, hence we need to limit the number of blocks we are
934 * trying to reserve to avoid an overflow. We can't allocate
935 * more than @nimaps extents, and an extent is limited on disk
936 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
937 */
938 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
939 if (unlikely(rt)) {
940 resrtextents = qblocks = resblks;
941 resrtextents /= mp->m_sb.sb_rextsize;
942 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
943 quota_flag = XFS_QMOPT_RES_RTBLKS;
944 } else {
945 resrtextents = 0;
946 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
947 quota_flag = XFS_QMOPT_RES_REGBLKS;
948 }
949
950 /*
951 * Allocate and setup the transaction.
952 */
Christoph Hellwig253f4912016-04-06 09:19:55 +1000953 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
954 resrtextents, 0, &tp);
955
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000956 /*
957 * Check for running out of space
958 */
959 if (error) {
960 /*
961 * Free the transaction structure.
962 */
Dave Chinner24513372014-06-25 14:58:08 +1000963 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000964 break;
965 }
966 xfs_ilock(ip, XFS_ILOCK_EXCL);
967 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
968 0, quota_flag);
969 if (error)
970 goto error1;
971
972 xfs_trans_ijoin(tp, ip, 0);
973
Brian Foster650919f2018-07-11 22:26:23 -0700974 xfs_defer_init(tp, &dfops, &tp->t_firstblock);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000975 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
Brian Fostera7beabe2018-07-11 22:26:25 -0700976 allocatesize_fsb, alloc_type, resblks,
977 imapp, &nimaps);
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100978 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000979 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000980
981 /*
982 * Complete the transaction
983 */
Brian Foster175d1a02018-07-11 22:26:12 -0700984 error = xfs_defer_finish(&tp, tp->t_dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100985 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000986 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000987
Christoph Hellwig70393312015-06-04 13:48:08 +1000988 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000989 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +1100990 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000991 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000992
993 allocated_fsb = imapp->br_blockcount;
994
995 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000996 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000997 break;
998 }
999
1000 startoffset_fsb += allocated_fsb;
1001 allocatesize_fsb -= allocated_fsb;
1002 }
1003
1004 return error;
1005
1006error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001007 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001008 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1009
1010error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001011 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001012 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1013 return error;
1014}
1015
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001016static int
1017xfs_unmap_extent(
1018 struct xfs_inode *ip,
1019 xfs_fileoff_t startoffset_fsb,
1020 xfs_filblks_t len_fsb,
1021 int *done)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001022{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001023 struct xfs_mount *mp = ip->i_mount;
1024 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001025 struct xfs_defer_ops dfops;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001026 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1027 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001028
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001029 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1030 if (error) {
1031 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1032 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001033 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001034
1035 xfs_ilock(ip, XFS_ILOCK_EXCL);
1036 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1037 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1038 if (error)
1039 goto out_trans_cancel;
1040
1041 xfs_trans_ijoin(tp, ip, 0);
1042
Brian Foster37283792018-07-11 22:26:23 -07001043 xfs_defer_init(tp, &dfops, &tp->t_firstblock);
Brian Foster2af52842018-07-11 22:26:25 -07001044 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001045 if (error)
1046 goto out_bmap_cancel;
1047
Brian Foster4bcfa612018-07-11 22:26:13 -07001048 xfs_defer_ijoin(tp->t_dfops, ip);
1049 error = xfs_defer_finish(&tp, tp->t_dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001050 if (error)
1051 goto out_bmap_cancel;
1052
1053 error = xfs_trans_commit(tp);
1054out_unlock:
1055 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001056 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001057
1058out_bmap_cancel:
Brian Foster4bcfa612018-07-11 22:26:13 -07001059 xfs_defer_cancel(tp->t_dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001060out_trans_cancel:
1061 xfs_trans_cancel(tp);
1062 goto out_unlock;
1063}
1064
1065static int
1066xfs_adjust_extent_unmap_boundaries(
1067 struct xfs_inode *ip,
1068 xfs_fileoff_t *startoffset_fsb,
1069 xfs_fileoff_t *endoffset_fsb)
1070{
1071 struct xfs_mount *mp = ip->i_mount;
1072 struct xfs_bmbt_irec imap;
1073 int nimap, error;
1074 xfs_extlen_t mod = 0;
1075
1076 nimap = 1;
1077 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1078 if (error)
1079 return error;
1080
1081 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001082 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
Dave Chinner0703a8e2018-06-08 09:54:22 -07001083 div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001084 if (mod)
1085 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1086 }
1087
1088 nimap = 1;
1089 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1090 if (error)
1091 return error;
1092
1093 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1094 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1095 mod++;
1096 if (mod && mod != mp->m_sb.sb_rextsize)
1097 *endoffset_fsb -= mod;
1098 }
1099
1100 return 0;
1101}
1102
1103static int
1104xfs_flush_unmap_range(
1105 struct xfs_inode *ip,
1106 xfs_off_t offset,
1107 xfs_off_t len)
1108{
1109 struct xfs_mount *mp = ip->i_mount;
1110 struct inode *inode = VFS_I(ip);
1111 xfs_off_t rounding, start, end;
1112 int error;
1113
1114 /* wait for the completion of any pending DIOs */
1115 inode_dio_wait(inode);
1116
1117 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1118 start = round_down(offset, rounding);
1119 end = round_up(offset + len, rounding) - 1;
1120
1121 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1122 if (error)
1123 return error;
1124 truncate_pagecache_range(inode, start, end);
1125 return 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001126}
1127
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001128int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001129xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001130 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001131 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001132 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001133{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001134 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001135 xfs_fileoff_t startoffset_fsb;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001136 xfs_fileoff_t endoffset_fsb;
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001137 int done = 0, error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001138
1139 trace_xfs_free_file_space(ip);
1140
Darrick J. Wongc14cfcc2018-05-04 15:30:21 -07001141 error = xfs_qm_dqattach(ip);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001142 if (error)
1143 return error;
1144
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001145 if (len <= 0) /* if nothing being freed */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001146 return 0;
1147
1148 error = xfs_flush_unmap_range(ip, offset, len);
1149 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001150 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001151
1152 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001153 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1154
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001155 /*
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001156 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1157 * and we can't use unwritten extents then we actually need to ensure
1158 * to zero the whole extent, otherwise we just need to take of block
1159 * boundaries, and xfs_bunmapi will handle the rest.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001160 */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001161 if (XFS_IS_REALTIME_INODE(ip) &&
1162 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1163 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1164 &endoffset_fsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001165 if (error)
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001166 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001167 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001168
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001169 if (endoffset_fsb > startoffset_fsb) {
1170 while (!done) {
1171 error = xfs_unmap_extent(ip, startoffset_fsb,
1172 endoffset_fsb - startoffset_fsb, &done);
1173 if (error)
1174 return error;
1175 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001176 }
1177
1178 /*
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001179 * Now that we've unmap all full blocks we'll have to zero out any
Christoph Hellwigf5c547172018-03-13 23:15:32 -07001180 * partial block at the beginning and/or end. iomap_zero_range is smart
1181 * enough to skip any holes, including those we just created, but we
1182 * must take care not to zero beyond EOF and enlarge i_size.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001183 */
Calvin Owens3dd09d52017-04-03 12:22:29 -07001184 if (offset >= XFS_ISIZE(ip))
1185 return 0;
Calvin Owens3dd09d52017-04-03 12:22:29 -07001186 if (offset + len > XFS_ISIZE(ip))
1187 len = XFS_ISIZE(ip) - offset;
Darrick J. Wonge53c4b52018-06-21 23:26:58 -07001188 error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
1189 if (error)
1190 return error;
1191
1192 /*
1193 * If we zeroed right up to EOF and EOF straddles a page boundary we
1194 * must make sure that the post-EOF area is also zeroed because the
1195 * page could be mmap'd and iomap_zero_range doesn't do that for us.
1196 * Writeback of the eof page will do this, albeit clumsily.
1197 */
1198 if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
1199 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1200 (offset + len) & ~PAGE_MASK, LLONG_MAX);
1201 }
1202
1203 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001204}
1205
Brian Foster5d11fb42014-10-30 10:35:11 +11001206/*
1207 * Preallocate and zero a range of a file. This mechanism has the allocation
1208 * semantics of fallocate and in addition converts data in the range to zeroes.
1209 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001210int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001211xfs_zero_file_space(
1212 struct xfs_inode *ip,
1213 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001214 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001215{
1216 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001217 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001218 int error;
1219
Dave Chinner897b73b2014-04-14 18:15:11 +10001220 trace_xfs_zero_file_space(ip);
1221
Brian Foster5d11fb42014-10-30 10:35:11 +11001222 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001223
1224 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001225 * Punch a hole and prealloc the range. We use hole punch rather than
1226 * unwritten extent conversion for two reasons:
1227 *
1228 * 1.) Hole punch handles partial block zeroing for us.
1229 *
1230 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1231 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001232 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001233 error = xfs_free_file_space(ip, offset, len);
1234 if (error)
1235 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001236
Brian Foster5d11fb42014-10-30 10:35:11 +11001237 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1238 round_up(offset + len, blksize) -
1239 round_down(offset, blksize),
1240 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001241out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001242 return error;
1243
1244}
1245
kbuild test robot72c1a732015-04-13 11:25:04 +10001246static int
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001247xfs_prepare_shift(
1248 struct xfs_inode *ip,
1249 loff_t offset)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001250{
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001251 int error;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001252
Brian Fosterf71721d2014-09-23 15:39:05 +10001253 /*
1254 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1255 * into the accessible region of the file.
1256 */
Brian Foster41b9d722014-09-02 12:12:53 +10001257 if (xfs_can_free_eofblocks(ip, true)) {
Brian Fostera36b9262017-01-27 23:22:55 -08001258 error = xfs_free_eofblocks(ip);
Brian Foster41b9d722014-09-02 12:12:53 +10001259 if (error)
1260 return error;
1261 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001262
Brian Fosterf71721d2014-09-23 15:39:05 +10001263 /*
1264 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001265 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001266 */
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001267 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001268 if (error)
1269 return error;
1270 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001271 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001272 if (error)
1273 return error;
1274
Namjae Jeona904b1c2015-03-25 15:08:56 +11001275 /*
Darrick J. Wong3af423b2017-09-18 09:41:17 -07001276 * Clean out anything hanging around in the cow fork now that
1277 * we've flushed all the dirty data out to disk to avoid having
1278 * CoW extents at the wrong offsets.
1279 */
1280 if (xfs_is_reflink_inode(ip)) {
1281 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1282 true);
1283 if (error)
1284 return error;
1285 }
1286
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001287 return 0;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001288}
1289
1290/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001291 * xfs_collapse_file_space()
1292 * This routine frees disk space and shift extent for the given file.
1293 * The first thing we do is to free data blocks in the specified range
1294 * by calling xfs_free_file_space(). It would also sync dirty data
1295 * and invalidate page cache over the region on which collapse range
1296 * is working. And Shift extent records to the left to cover a hole.
1297 * RETURNS:
1298 * 0 on success
1299 * errno on error
1300 *
1301 */
1302int
1303xfs_collapse_file_space(
1304 struct xfs_inode *ip,
1305 xfs_off_t offset,
1306 xfs_off_t len)
1307{
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001308 struct xfs_mount *mp = ip->i_mount;
1309 struct xfs_trans *tp;
1310 int error;
1311 struct xfs_defer_ops dfops;
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001312 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1313 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1314 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001315 bool done = false;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001316
1317 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwig9ad1a23a2017-10-23 16:32:38 -07001318 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1319
Namjae Jeona904b1c2015-03-25 15:08:56 +11001320 trace_xfs_collapse_file_space(ip);
1321
1322 error = xfs_free_file_space(ip, offset, len);
1323 if (error)
1324 return error;
1325
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001326 error = xfs_prepare_shift(ip, offset);
1327 if (error)
1328 return error;
1329
1330 while (!error && !done) {
1331 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1332 &tp);
1333 if (error)
1334 break;
1335
1336 xfs_ilock(ip, XFS_ILOCK_EXCL);
1337 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1338 ip->i_gdquot, ip->i_pdquot, resblks, 0,
1339 XFS_QMOPT_RES_REGBLKS);
1340 if (error)
1341 goto out_trans_cancel;
1342 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1343
Brian Fosterd0a9d792018-07-11 22:26:24 -07001344 xfs_defer_init(tp, &dfops, &tp->t_firstblock);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001345 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
Brian Fosterd0a9d792018-07-11 22:26:24 -07001346 &done, &tp->t_firstblock);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001347 if (error)
1348 goto out_bmap_cancel;
1349
Brian Fosterf4a9cf972018-07-11 22:26:15 -07001350 error = xfs_defer_finish(&tp, tp->t_dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001351 if (error)
1352 goto out_bmap_cancel;
1353 error = xfs_trans_commit(tp);
1354 }
1355
1356 return error;
1357
1358out_bmap_cancel:
Brian Fosterf4a9cf972018-07-11 22:26:15 -07001359 xfs_defer_cancel(tp->t_dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001360out_trans_cancel:
1361 xfs_trans_cancel(tp);
1362 return error;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001363}
1364
1365/*
1366 * xfs_insert_file_space()
1367 * This routine create hole space by shifting extents for the given file.
1368 * The first thing we do is to sync dirty data and invalidate page cache
1369 * over the region on which insert range is working. And split an extent
1370 * to two extents at given offset by calling xfs_bmap_split_extent.
1371 * And shift all extent records which are laying between [offset,
1372 * last allocated extent] to the right to reserve hole range.
1373 * RETURNS:
1374 * 0 on success
1375 * errno on error
1376 */
1377int
1378xfs_insert_file_space(
1379 struct xfs_inode *ip,
1380 loff_t offset,
1381 loff_t len)
1382{
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001383 struct xfs_mount *mp = ip->i_mount;
1384 struct xfs_trans *tp;
1385 int error;
1386 struct xfs_defer_ops dfops;
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001387 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1388 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1389 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001390 bool done = false;
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001391
Namjae Jeona904b1c2015-03-25 15:08:56 +11001392 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
Christoph Hellwig9ad1a23a2017-10-23 16:32:38 -07001393 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1394
Namjae Jeona904b1c2015-03-25 15:08:56 +11001395 trace_xfs_insert_file_space(ip);
1396
Darrick J. Wongf62cb482018-06-21 23:26:57 -07001397 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1398 if (error)
1399 return error;
1400
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001401 error = xfs_prepare_shift(ip, offset);
1402 if (error)
1403 return error;
1404
1405 /*
1406 * The extent shifting code works on extent granularity. So, if stop_fsb
1407 * is not the starting block of extent, we need to split the extent at
1408 * stop_fsb.
1409 */
1410 error = xfs_bmap_split_extent(ip, stop_fsb);
1411 if (error)
1412 return error;
1413
1414 while (!error && !done) {
1415 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
1416 &tp);
1417 if (error)
1418 break;
1419
1420 xfs_ilock(ip, XFS_ILOCK_EXCL);
1421 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Brian Fosterd0a9d792018-07-11 22:26:24 -07001422 xfs_defer_init(tp, &dfops, &tp->t_firstblock);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07001423 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
Brian Fosterd0a9d792018-07-11 22:26:24 -07001424 &done, stop_fsb, &tp->t_firstblock);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001425 if (error)
1426 goto out_bmap_cancel;
1427
Brian Fosterf4a9cf972018-07-11 22:26:15 -07001428 error = xfs_defer_finish(&tp, tp->t_dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001429 if (error)
1430 goto out_bmap_cancel;
1431 error = xfs_trans_commit(tp);
1432 }
1433
1434 return error;
1435
1436out_bmap_cancel:
Brian Fosterf4a9cf972018-07-11 22:26:15 -07001437 xfs_defer_cancel(tp->t_dfops);
Christoph Hellwig4ed36c62017-10-19 11:07:10 -07001438 xfs_trans_cancel(tp);
1439 return error;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001440}
1441
1442/*
Dave Chinnera133d952013-08-12 20:49:48 +10001443 * We need to check that the format of the data fork in the temporary inode is
1444 * valid for the target inode before doing the swap. This is not a problem with
1445 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1446 * data fork depending on the space the attribute fork is taking so we can get
1447 * invalid formats on the target inode.
1448 *
1449 * E.g. target has space for 7 extents in extent format, temp inode only has
1450 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1451 * btree, but when swapped it needs to be in extent format. Hence we can't just
1452 * blindly swap data forks on attr2 filesystems.
1453 *
1454 * Note that we check the swap in both directions so that we don't end up with
1455 * a corrupt temporary inode, either.
1456 *
1457 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1458 * inode will prevent this situation from occurring, so all we do here is
1459 * reject and log the attempt. basically we are putting the responsibility on
1460 * userspace to get this right.
1461 */
1462static int
1463xfs_swap_extents_check_format(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001464 struct xfs_inode *ip, /* target inode */
1465 struct xfs_inode *tip) /* tmp inode */
Dave Chinnera133d952013-08-12 20:49:48 +10001466{
1467
1468 /* Should never get a local format */
1469 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1470 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001471 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001472
1473 /*
1474 * if the target inode has less extents that then temporary inode then
1475 * why did userspace call us?
1476 */
1477 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001478 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001479
1480 /*
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001481 * If we have to use the (expensive) rmap swap method, we can
1482 * handle any number of extents and any format.
1483 */
1484 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1485 return 0;
1486
1487 /*
Dave Chinnera133d952013-08-12 20:49:48 +10001488 * if the target inode is in extent form and the temp inode is in btree
1489 * form then we will end up with the target inode in the wrong format
1490 * as we already know there are less extents in the temp inode.
1491 */
1492 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1493 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001494 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001495
1496 /* Check temp in extent form to max in target */
1497 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1498 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1499 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001500 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001501
1502 /* Check target in extent form to max in temp */
1503 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1504 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1505 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001506 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001507
1508 /*
1509 * If we are in a btree format, check that the temp root block will fit
1510 * in the target and that it has enough extents to be in btree format
1511 * in the target.
1512 *
1513 * Note that we have to be careful to allow btree->extent conversions
1514 * (a common defrag case) which will occur when the temp inode is in
1515 * extent format...
1516 */
1517 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001518 if (XFS_IFORK_Q(ip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001519 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001520 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001521 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1522 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001523 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001524 }
1525
1526 /* Reciprocal target->temp btree format checks */
1527 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001528 if (XFS_IFORK_Q(tip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001529 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001530 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001531 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1532 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001533 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001534 }
1535
1536 return 0;
1537}
1538
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001539static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001540xfs_swap_extent_flush(
1541 struct xfs_inode *ip)
1542{
1543 int error;
1544
1545 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1546 if (error)
1547 return error;
1548 truncate_pagecache_range(VFS_I(ip), 0, -1);
1549
1550 /* Verify O_DIRECT for ftmp */
1551 if (VFS_I(ip)->i_mapping->nrpages)
1552 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001553 return 0;
1554}
1555
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001556/*
1557 * Move extents from one file to another, when rmap is enabled.
1558 */
1559STATIC int
1560xfs_swap_extent_rmap(
1561 struct xfs_trans **tpp,
1562 struct xfs_inode *ip,
1563 struct xfs_inode *tip)
1564{
Brian Foster7a7943c2018-07-11 22:26:17 -07001565 struct xfs_trans *tp = *tpp;
1566 struct xfs_mount *mp = tp->t_mountp;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001567 struct xfs_bmbt_irec irec;
1568 struct xfs_bmbt_irec uirec;
1569 struct xfs_bmbt_irec tirec;
1570 xfs_fileoff_t offset_fsb;
1571 xfs_fileoff_t end_fsb;
1572 xfs_filblks_t count_fsb;
1573 xfs_fsblock_t firstfsb;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001574 int error;
1575 xfs_filblks_t ilen;
1576 xfs_filblks_t rlen;
1577 int nimaps;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001578 uint64_t tip_flags2;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001579
1580 /*
1581 * If the source file has shared blocks, we must flag the donor
1582 * file as having shared blocks so that we get the shared-block
1583 * rmap functions when we go to fix up the rmaps. The flags
1584 * will be switch for reals later.
1585 */
1586 tip_flags2 = tip->i_d.di_flags2;
1587 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1588 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1589
1590 offset_fsb = 0;
1591 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1592 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1593
1594 while (count_fsb) {
1595 /* Read extent from the donor file */
1596 nimaps = 1;
1597 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1598 &nimaps, 0);
1599 if (error)
1600 goto out;
1601 ASSERT(nimaps == 1);
1602 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1603
1604 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1605 ilen = tirec.br_blockcount;
1606
1607 /* Unmap the old blocks in the source file. */
1608 while (tirec.br_blockcount) {
Brian Fosterbcd2c9f2018-07-11 22:26:19 -07001609 xfs_defer_init(tp, tp->t_dfops, &firstfsb);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001610 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1611
1612 /* Read extent from the source file */
1613 nimaps = 1;
1614 error = xfs_bmapi_read(ip, tirec.br_startoff,
1615 tirec.br_blockcount, &irec,
1616 &nimaps, 0);
1617 if (error)
1618 goto out_defer;
1619 ASSERT(nimaps == 1);
1620 ASSERT(tirec.br_startoff == irec.br_startoff);
1621 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1622
1623 /* Trim the extent. */
1624 uirec = tirec;
1625 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1626 tirec.br_blockcount,
1627 irec.br_blockcount);
1628 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1629
1630 /* Remove the mapping from the donor file. */
Brian Foster7a7943c2018-07-11 22:26:17 -07001631 error = xfs_bmap_unmap_extent(mp, tp->t_dfops, tip,
1632 &uirec);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001633 if (error)
1634 goto out_defer;
1635
1636 /* Remove the mapping from the source file. */
Brian Foster7a7943c2018-07-11 22:26:17 -07001637 error = xfs_bmap_unmap_extent(mp, tp->t_dfops, ip,
1638 &irec);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001639 if (error)
1640 goto out_defer;
1641
1642 /* Map the donor file's blocks into the source file. */
Brian Foster7a7943c2018-07-11 22:26:17 -07001643 error = xfs_bmap_map_extent(mp, tp->t_dfops, ip,
1644 &uirec);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001645 if (error)
1646 goto out_defer;
1647
1648 /* Map the source file's blocks into the donor file. */
Brian Foster7a7943c2018-07-11 22:26:17 -07001649 error = xfs_bmap_map_extent(mp, tp->t_dfops, tip,
1650 &irec);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001651 if (error)
1652 goto out_defer;
1653
Brian Foster7a7943c2018-07-11 22:26:17 -07001654 xfs_defer_ijoin(tp->t_dfops, ip);
1655 error = xfs_defer_finish(tpp, tp->t_dfops);
1656 tp = *tpp;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001657 if (error)
1658 goto out_defer;
1659
1660 tirec.br_startoff += rlen;
1661 if (tirec.br_startblock != HOLESTARTBLOCK &&
1662 tirec.br_startblock != DELAYSTARTBLOCK)
1663 tirec.br_startblock += rlen;
1664 tirec.br_blockcount -= rlen;
1665 }
1666
1667 /* Roll on... */
1668 count_fsb -= ilen;
1669 offset_fsb += ilen;
1670 }
1671
1672 tip->i_d.di_flags2 = tip_flags2;
1673 return 0;
1674
1675out_defer:
Brian Foster7a7943c2018-07-11 22:26:17 -07001676 xfs_defer_cancel(tp->t_dfops);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001677out:
1678 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1679 tip->i_d.di_flags2 = tip_flags2;
1680 return error;
1681}
1682
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001683/* Swap the extents of two files by swapping data forks. */
1684STATIC int
1685xfs_swap_extent_forks(
1686 struct xfs_trans *tp,
1687 struct xfs_inode *ip,
1688 struct xfs_inode *tip,
1689 int *src_log_flags,
1690 int *target_log_flags)
1691{
1692 struct xfs_ifork tempifp, *ifp, *tifp;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001693 xfs_filblks_t aforkblks = 0;
1694 xfs_filblks_t taforkblks = 0;
1695 xfs_extnum_t junk;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001696 uint64_t tmp;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001697 int error;
1698
1699 /*
1700 * Count the number of extended attribute blocks
1701 */
1702 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1703 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001704 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001705 &aforkblks);
1706 if (error)
1707 return error;
1708 }
1709 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1710 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001711 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001712 &taforkblks);
1713 if (error)
1714 return error;
1715 }
1716
1717 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07001718 * Btree format (v3) inodes have the inode number stamped in the bmbt
1719 * block headers. We can't start changing the bmbt blocks until the
1720 * inode owner change is logged so recovery does the right thing in the
1721 * event of a crash. Set the owner change log flags now and leave the
1722 * bmbt scan as the last step.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001723 */
1724 if (ip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001725 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001726 (*target_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001727 if (tip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001728 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001729 (*src_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001730
1731 /*
1732 * Swap the data forks of the inodes
1733 */
1734 ifp = &ip->i_df;
1735 tifp = &tip->i_df;
1736 tempifp = *ifp; /* struct copy */
1737 *ifp = *tifp; /* struct copy */
1738 *tifp = tempifp; /* struct copy */
1739
1740 /*
1741 * Fix the on-disk inode values
1742 */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001743 tmp = (uint64_t)ip->i_d.di_nblocks;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001744 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1745 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1746
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001747 tmp = (uint64_t) ip->i_d.di_nextents;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001748 ip->i_d.di_nextents = tip->i_d.di_nextents;
1749 tip->i_d.di_nextents = tmp;
1750
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001751 tmp = (uint64_t) ip->i_d.di_format;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001752 ip->i_d.di_format = tip->i_d.di_format;
1753 tip->i_d.di_format = tmp;
1754
1755 /*
1756 * The extents in the source inode could still contain speculative
1757 * preallocation beyond EOF (e.g. the file is open but not modified
1758 * while defrag is in progress). In that case, we need to copy over the
1759 * number of delalloc blocks the data fork in the source inode is
1760 * tracking beyond EOF so that when the fork is truncated away when the
1761 * temporary inode is unlinked we don't underrun the i_delayed_blks
1762 * counter on that inode.
1763 */
1764 ASSERT(tip->i_delayed_blks == 0);
1765 tip->i_delayed_blks = ip->i_delayed_blks;
1766 ip->i_delayed_blks = 0;
1767
1768 switch (ip->i_d.di_format) {
1769 case XFS_DINODE_FMT_EXTENTS:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001770 (*src_log_flags) |= XFS_ILOG_DEXT;
1771 break;
1772 case XFS_DINODE_FMT_BTREE:
1773 ASSERT(ip->i_d.di_version < 3 ||
1774 (*src_log_flags & XFS_ILOG_DOWNER));
1775 (*src_log_flags) |= XFS_ILOG_DBROOT;
1776 break;
1777 }
1778
1779 switch (tip->i_d.di_format) {
1780 case XFS_DINODE_FMT_EXTENTS:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001781 (*target_log_flags) |= XFS_ILOG_DEXT;
1782 break;
1783 case XFS_DINODE_FMT_BTREE:
1784 (*target_log_flags) |= XFS_ILOG_DBROOT;
1785 ASSERT(tip->i_d.di_version < 3 ||
1786 (*target_log_flags & XFS_ILOG_DOWNER));
1787 break;
1788 }
1789
1790 return 0;
1791}
1792
Brian Foster2dd3d702017-08-29 10:08:40 -07001793/*
1794 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1795 * change owner scan attempts to order all modified buffers in the current
1796 * transaction. In the event of ordered buffer failure, the offending buffer is
1797 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1798 * the transaction in this case to replenish the fallback log reservation and
1799 * restart the scan. This process repeats until the scan completes.
1800 */
1801static int
1802xfs_swap_change_owner(
1803 struct xfs_trans **tpp,
1804 struct xfs_inode *ip,
1805 struct xfs_inode *tmpip)
1806{
1807 int error;
1808 struct xfs_trans *tp = *tpp;
1809
1810 do {
1811 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1812 NULL);
1813 /* success or fatal error */
1814 if (error != -EAGAIN)
1815 break;
1816
1817 error = xfs_trans_roll(tpp);
1818 if (error)
1819 break;
1820 tp = *tpp;
1821
1822 /*
1823 * Redirty both inodes so they can relog and keep the log tail
1824 * moving forward.
1825 */
1826 xfs_trans_ijoin(tp, ip, 0);
1827 xfs_trans_ijoin(tp, tmpip, 0);
1828 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1829 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1830 } while (true);
1831
1832 return error;
1833}
1834
Dave Chinner4ef897a2014-08-04 13:44:08 +10001835int
Dave Chinnera133d952013-08-12 20:49:48 +10001836xfs_swap_extents(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001837 struct xfs_inode *ip, /* target inode */
1838 struct xfs_inode *tip, /* tmp inode */
1839 struct xfs_swapext *sxp)
Dave Chinnera133d952013-08-12 20:49:48 +10001840{
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001841 struct xfs_mount *mp = ip->i_mount;
1842 struct xfs_trans *tp;
Brian Foster7a7943c2018-07-11 22:26:17 -07001843 struct xfs_defer_ops dfops;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001844 struct xfs_bstat *sbp = &sxp->sx_stat;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001845 int src_log_flags, target_log_flags;
1846 int error = 0;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001847 int lock_flags;
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001848 struct xfs_ifork *cowfp;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001849 uint64_t f;
Brian Foster2dd3d702017-08-29 10:08:40 -07001850 int resblks = 0;
Brian Foster7a7943c2018-07-11 22:26:17 -07001851 xfs_fsblock_t firstfsb;
Dave Chinnera133d952013-08-12 20:49:48 +10001852
Dave Chinnera133d952013-08-12 20:49:48 +10001853 /*
Dave Chinner723cac42015-02-23 21:47:29 +11001854 * Lock the inodes against other IO, page faults and truncate to
1855 * begin with. Then we can ensure the inodes are flushed and have no
1856 * page cache safely. Once we have done this we can take the ilocks and
1857 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10001858 */
Christoph Hellwig65523212016-11-30 14:33:25 +11001859 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1860 lock_flags = XFS_MMAPLOCK_EXCL;
Darrick J. Wong7c2d2382018-01-26 15:27:33 -08001861 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10001862
1863 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001864 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10001865 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001866 goto out_unlock;
1867 }
1868
1869 /* Verify both files are either real-time or non-realtime */
1870 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10001871 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001872 goto out_unlock;
1873 }
1874
Dave Chinner4ef897a2014-08-04 13:44:08 +10001875 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10001876 if (error)
1877 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001878 error = xfs_swap_extent_flush(tip);
1879 if (error)
1880 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10001881
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001882 /*
1883 * Extent "swapping" with rmap requires a permanent reservation and
1884 * a block reservation because it's really just a remap operation
1885 * performed with log redo items!
1886 */
1887 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
Brian Fosterb3fed432018-03-09 14:01:58 -08001888 int w = XFS_DATA_FORK;
1889 uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
1890 uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
1891
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001892 /*
Brian Fosterb3fed432018-03-09 14:01:58 -08001893 * Conceptually this shouldn't affect the shape of either bmbt,
1894 * but since we atomically move extents one by one, we reserve
1895 * enough space to rebuild both trees.
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001896 */
Brian Fosterb3fed432018-03-09 14:01:58 -08001897 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1898 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1899
1900 /*
1901 * Handle the corner case where either inode might straddle the
1902 * btree format boundary. If so, the inode could bounce between
1903 * btree <-> extent format on unmap -> remap cycles, freeing and
1904 * allocating a bmapbt block each time.
1905 */
1906 if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
1907 resblks += XFS_IFORK_MAXEXT(ip, w);
1908 if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
1909 resblks += XFS_IFORK_MAXEXT(tip, w);
Brian Foster2dd3d702017-08-29 10:08:40 -07001910 }
1911 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001912 if (error)
Dave Chinnera133d952013-08-12 20:49:48 +10001913 goto out_unlock;
Brian Fosterbcd2c9f2018-07-11 22:26:19 -07001914 xfs_defer_init(tp, &dfops, &firstfsb);
Dave Chinner723cac42015-02-23 21:47:29 +11001915
1916 /*
1917 * Lock and join the inodes to the tansaction so that transaction commit
1918 * or cancel will unlock the inodes from this point onwards.
1919 */
Darrick J. Wong7c2d2382018-01-26 15:27:33 -08001920 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
Dave Chinner4ef897a2014-08-04 13:44:08 +10001921 lock_flags |= XFS_ILOCK_EXCL;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001922 xfs_trans_ijoin(tp, ip, 0);
1923 xfs_trans_ijoin(tp, tip, 0);
Dave Chinner723cac42015-02-23 21:47:29 +11001924
Dave Chinnera133d952013-08-12 20:49:48 +10001925
1926 /* Verify all data are being swapped */
1927 if (sxp->sx_offset != 0 ||
1928 sxp->sx_length != ip->i_d.di_size ||
1929 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10001930 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001931 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001932 }
1933
1934 trace_xfs_swap_extent_before(ip, 0);
1935 trace_xfs_swap_extent_before(tip, 1);
1936
1937 /* check inode formats now that data is flushed */
1938 error = xfs_swap_extents_check_format(ip, tip);
1939 if (error) {
1940 xfs_notice(mp,
1941 "%s: inode 0x%llx format is incompatible for exchanging.",
1942 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10001943 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001944 }
1945
1946 /*
1947 * Compare the current change & modify times with that
1948 * passed in. If they differ, we abort this swap.
1949 * This is the mechanism used to ensure the calling
1950 * process that the file was not changed out from
1951 * under it.
1952 */
1953 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1954 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1955 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1956 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10001957 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10001958 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001959 }
Dave Chinnera133d952013-08-12 20:49:48 +10001960
Dave Chinner21b5c972013-08-30 10:23:44 +10001961 /*
Dave Chinner21b5c972013-08-30 10:23:44 +10001962 * Note the trickiness in setting the log flags - we set the owner log
1963 * flag on the opposite inode (i.e. the inode we are setting the new
1964 * owner to be) because once we swap the forks and log that, log
1965 * recovery is going to see the fork as owned by the swapped inode,
1966 * not the pre-swapped inodes.
1967 */
1968 src_log_flags = XFS_ILOG_CORE;
1969 target_log_flags = XFS_ILOG_CORE;
Dave Chinner21b5c972013-08-30 10:23:44 +10001970
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001971 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1972 error = xfs_swap_extent_rmap(&tp, ip, tip);
1973 else
1974 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1975 &target_log_flags);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001976 if (error)
1977 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10001978
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001979 /* Do we have to swap reflink flags? */
1980 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
1981 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
1982 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1983 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1984 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
1985 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1986 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
Darrick J. Wong52bfcdd72017-09-18 09:41:18 -07001987 }
1988
1989 /* Swap the cow forks. */
1990 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1991 xfs_extnum_t extnum;
1992
1993 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1994 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
1995
1996 extnum = ip->i_cnextents;
1997 ip->i_cnextents = tip->i_cnextents;
1998 tip->i_cnextents = extnum;
1999
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002000 cowfp = ip->i_cowfp;
2001 ip->i_cowfp = tip->i_cowfp;
2002 tip->i_cowfp = cowfp;
Darrick J. Wong52bfcdd72017-09-18 09:41:18 -07002003
Christoph Hellwig5bcffe32018-03-13 23:15:30 -07002004 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
Darrick J. Wong52bfcdd72017-09-18 09:41:18 -07002005 xfs_inode_set_cowblocks_tag(ip);
2006 else
2007 xfs_inode_clear_cowblocks_tag(ip);
Christoph Hellwig5bcffe32018-03-13 23:15:30 -07002008 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
Darrick J. Wong52bfcdd72017-09-18 09:41:18 -07002009 xfs_inode_set_cowblocks_tag(tip);
2010 else
2011 xfs_inode_clear_cowblocks_tag(tip);
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002012 }
2013
Dave Chinnera133d952013-08-12 20:49:48 +10002014 xfs_trans_log_inode(tp, ip, src_log_flags);
2015 xfs_trans_log_inode(tp, tip, target_log_flags);
2016
2017 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07002018 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2019 * have inode number owner values in the bmbt blocks that still refer to
2020 * the old inode. Scan each bmbt to fix up the owner values with the
2021 * inode number of the current inode.
2022 */
2023 if (src_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002024 error = xfs_swap_change_owner(&tp, ip, tip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002025 if (error)
2026 goto out_trans_cancel;
2027 }
2028 if (target_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002029 error = xfs_swap_change_owner(&tp, tip, ip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002030 if (error)
2031 goto out_trans_cancel;
2032 }
2033
2034 /*
Dave Chinnera133d952013-08-12 20:49:48 +10002035 * If this is a synchronous mount, make sure that the
2036 * transaction goes to disk before returning to the user.
2037 */
2038 if (mp->m_flags & XFS_MOUNT_WSYNC)
2039 xfs_trans_set_sync(tp);
2040
Christoph Hellwig70393312015-06-04 13:48:08 +10002041 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10002042
2043 trace_xfs_swap_extent_after(ip, 0);
2044 trace_xfs_swap_extent_after(tip, 1);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002045
Christoph Hellwig65523212016-11-30 14:33:25 +11002046out_unlock:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002047 xfs_iunlock(ip, lock_flags);
2048 xfs_iunlock(tip, lock_flags);
Christoph Hellwig65523212016-11-30 14:33:25 +11002049 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
Dave Chinnera133d952013-08-12 20:49:48 +10002050 return error;
2051
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002052out_trans_cancel:
2053 xfs_trans_cancel(tp);
Christoph Hellwig65523212016-11-30 14:33:25 +11002054 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002055}