blob: 8661be0aacaae27f6fab15383161bbdeb6ed9678 [file] [log] [blame]
Dave Chinner68988112013-08-12 20:49:42 +10001/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10003 * Copyright (c) 2012 Red Hat, Inc.
Dave Chinner68988112013-08-12 20:49:42 +10004 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110021#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110022#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
Dave Chinner68988112013-08-12 20:49:42 +100025#include "xfs_bit.h"
Dave Chinner68988112013-08-12 20:49:42 +100026#include "xfs_mount.h"
Dave Chinner57062782013-10-15 09:17:51 +110027#include "xfs_da_format.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100028#include "xfs_defer.h"
Dave Chinner68988112013-08-12 20:49:42 +100029#include "xfs_inode.h"
30#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110031#include "xfs_trans.h"
Dave Chinner68988112013-08-12 20:49:42 +100032#include "xfs_extfree_item.h"
33#include "xfs_alloc.h"
34#include "xfs_bmap.h"
35#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110036#include "xfs_bmap_btree.h"
Dave Chinner68988112013-08-12 20:49:42 +100037#include "xfs_rtalloc.h"
38#include "xfs_error.h"
39#include "xfs_quota.h"
40#include "xfs_trans_space.h"
41#include "xfs_trace.h"
Dave Chinnerc24b5df2013-08-12 20:49:45 +100042#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100044#include "xfs_rmap_btree.h"
Darrick J. Wongf86f4032016-10-03 09:11:41 -070045#include "xfs_iomap.h"
46#include "xfs_reflink.h"
47#include "xfs_refcount.h"
Dave Chinner68988112013-08-12 20:49:42 +100048
49/* Kernel only BMAP related definitions and functions */
50
51/*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56xfs_daddr_t
57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58{
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62}
63
64/*
Dave Chinner3fbbbea2015-11-03 12:27:22 +110065 * Routine to zero an extent on disk allocated to the specific inode.
66 *
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
70 */
71int
72xfs_zero_extent(
73 struct xfs_inode *ip,
74 xfs_fsblock_t start_fsb,
75 xfs_off_t count_fsb)
76{
77 struct xfs_mount *mp = ip->i_mount;
78 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
79 sector_t block = XFS_BB_TO_FSBT(mp, sector);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110080
Matthew Wilcox3dc29162016-03-15 11:20:41 -060081 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
82 block << (mp->m_super->s_blocksize_bits - 9),
83 count_fsb << (mp->m_super->s_blocksize_bits - 9),
Christoph Hellwigee472d82017-04-05 19:21:08 +020084 GFP_NOFS, 0);
Dave Chinner3fbbbea2015-11-03 12:27:22 +110085}
86
Dave Chinner68988112013-08-12 20:49:42 +100087int
88xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
90{
Dave Chinner68988112013-08-12 20:49:42 +100091 int error; /* error return value */
92 xfs_mount_t *mp; /* mount point structure */
93 xfs_extlen_t prod = 0; /* product factor for allocators */
94 xfs_extlen_t ralen = 0; /* realtime allocation length */
95 xfs_extlen_t align; /* minimum allocation alignment */
96 xfs_rtblock_t rtb;
97
98 mp = ap->ip->i_mount;
99 align = xfs_get_extsz_hint(ap->ip);
100 prod = align / mp->m_sb.sb_rextsize;
101 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
102 align, 1, ap->eof, 0,
103 ap->conv, &ap->offset, &ap->length);
104 if (error)
105 return error;
106 ASSERT(ap->length);
107 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
108
109 /*
110 * If the offset & length are not perfectly aligned
111 * then kill prod, it will just get us in trouble.
112 */
113 if (do_mod(ap->offset, align) || ap->length % align)
114 prod = 1;
115 /*
116 * Set ralen to be the actual requested length in rtextents.
117 */
118 ralen = ap->length / mp->m_sb.sb_rextsize;
119 /*
120 * If the old value was close enough to MAXEXTLEN that
121 * we rounded up to it, cut it back so it's valid again.
122 * Note that if it's a really large request (bigger than
123 * MAXEXTLEN), we don't hear about that number, and can't
124 * adjust the starting point to match it.
125 */
126 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
127 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
128
129 /*
Dave Chinner4b680af2016-02-08 10:46:51 +1100130 * Lock out modifications to both the RT bitmap and summary inodes
Dave Chinner68988112013-08-12 20:49:42 +1000131 */
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000132 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
Dave Chinner68988112013-08-12 20:49:42 +1000133 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Darrick J. Wongf4a06602016-08-03 11:00:42 +1000134 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
Dave Chinner4b680af2016-02-08 10:46:51 +1100135 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Dave Chinner68988112013-08-12 20:49:42 +1000136
137 /*
138 * If it's an allocation to an empty file at offset 0,
139 * pick an extent that will space things out in the rt area.
140 */
141 if (ap->eof && ap->offset == 0) {
142 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
143
144 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
145 if (error)
146 return error;
147 ap->blkno = rtx * mp->m_sb.sb_rextsize;
148 } else {
149 ap->blkno = 0;
150 }
151
152 xfs_bmap_adjacent(ap);
153
154 /*
155 * Realtime allocation, done through xfs_rtallocate_extent.
156 */
Dave Chinner68988112013-08-12 20:49:42 +1000157 do_div(ap->blkno, mp->m_sb.sb_rextsize);
158 rtb = ap->blkno;
159 ap->length = ralen;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800160 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
161 &ralen, ap->wasdel, prod, &rtb);
162 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000163 return error;
Christoph Hellwig089ec2f2017-02-17 08:21:06 -0800164
Dave Chinner68988112013-08-12 20:49:42 +1000165 ap->blkno = rtb;
166 if (ap->blkno != NULLFSBLOCK) {
167 ap->blkno *= mp->m_sb.sb_rextsize;
168 ralen *= mp->m_sb.sb_rextsize;
169 ap->length = ralen;
170 ap->ip->i_d.di_nblocks += ralen;
171 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
172 if (ap->wasdel)
173 ap->ip->i_delayed_blks -= ralen;
174 /*
175 * Adjust the disk quota also. This was reserved
176 * earlier.
177 */
178 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
179 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
180 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100181
182 /* Zero the extent if we were asked to do so */
Dave Chinner292378e2016-09-26 08:21:28 +1000183 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
Dave Chinner3fbbbea2015-11-03 12:27:22 +1100184 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
185 if (error)
186 return error;
187 }
Dave Chinner68988112013-08-12 20:49:42 +1000188 } else {
189 ap->length = 0;
190 }
191 return 0;
192}
193
194/*
Dave Chinner68988112013-08-12 20:49:42 +1000195 * Check if the endoff is outside the last extent. If so the caller will grow
196 * the allocation to a stripe unit boundary. All offsets are considered outside
197 * the end of file for an empty fork, so 1 is returned in *eof in that case.
198 */
199int
200xfs_bmap_eof(
201 struct xfs_inode *ip,
202 xfs_fileoff_t endoff,
203 int whichfork,
204 int *eof)
205{
206 struct xfs_bmbt_irec rec;
207 int error;
208
209 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
210 if (error || *eof)
211 return error;
212
213 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
214 return 0;
215}
216
217/*
218 * Extent tree block counting routines.
219 */
220
221/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700222 * Count leaf blocks given a range of extent records. Delayed allocation
223 * extents are not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000224 */
225STATIC void
226xfs_bmap_count_leaves(
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700227 struct xfs_ifork *ifp,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700228 xfs_extnum_t *numrecs,
229 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000230{
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700231 xfs_extnum_t i;
232 xfs_extnum_t nr_exts = xfs_iext_count(ifp);
Dave Chinner68988112013-08-12 20:49:42 +1000233
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700234 for (i = 0; i < nr_exts; i++) {
235 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, i);
236 if (!isnullstartblock(xfs_bmbt_get_startblock(frp))) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700237 (*numrecs)++;
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700238 *count += xfs_bmbt_get_blockcount(frp);
239 }
Dave Chinner68988112013-08-12 20:49:42 +1000240 }
241}
242
243/*
244 * Count leaf blocks given a range of extent records originally
245 * in btree format.
246 */
247STATIC void
248xfs_bmap_disk_count_leaves(
249 struct xfs_mount *mp,
250 struct xfs_btree_block *block,
251 int numrecs,
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700252 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000253{
254 int b;
255 xfs_bmbt_rec_t *frp;
256
257 for (b = 1; b <= numrecs; b++) {
258 frp = XFS_BMBT_REC_ADDR(mp, block, b);
259 *count += xfs_bmbt_disk_get_blockcount(frp);
260 }
261}
262
263/*
264 * Recursively walks each level of a btree
Zhi Yong Wu8be11e92013-08-12 03:14:52 +0000265 * to count total fsblocks in use.
Dave Chinner68988112013-08-12 20:49:42 +1000266 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700267STATIC int
Dave Chinner68988112013-08-12 20:49:42 +1000268xfs_bmap_count_tree(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700269 struct xfs_mount *mp,
270 struct xfs_trans *tp,
271 struct xfs_ifork *ifp,
272 xfs_fsblock_t blockno,
273 int levelin,
274 xfs_extnum_t *nextents,
275 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000276{
277 int error;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700278 struct xfs_buf *bp, *nbp;
Dave Chinner68988112013-08-12 20:49:42 +1000279 int level = levelin;
280 __be64 *pp;
281 xfs_fsblock_t bno = blockno;
282 xfs_fsblock_t nextbno;
283 struct xfs_btree_block *block, *nextblock;
284 int numrecs;
285
286 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
287 &xfs_bmbt_buf_ops);
288 if (error)
289 return error;
290 *count += 1;
291 block = XFS_BUF_TO_BLOCK(bp);
292
293 if (--level) {
294 /* Not at node above leaves, count this level of nodes */
295 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
296 while (nextbno != NULLFSBLOCK) {
297 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
298 XFS_BMAP_BTREE_REF,
299 &xfs_bmbt_buf_ops);
300 if (error)
301 return error;
302 *count += 1;
303 nextblock = XFS_BUF_TO_BLOCK(nbp);
304 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
305 xfs_trans_brelse(tp, nbp);
306 }
307
308 /* Dive to the next level */
309 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
310 bno = be64_to_cpu(*pp);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700311 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
312 count);
313 if (error) {
Dave Chinner68988112013-08-12 20:49:42 +1000314 xfs_trans_brelse(tp, bp);
315 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
316 XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +1000317 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000318 }
319 xfs_trans_brelse(tp, bp);
320 } else {
321 /* count all level 1 nodes and their leaves */
322 for (;;) {
323 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
324 numrecs = be16_to_cpu(block->bb_numrecs);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700325 (*nextents) += numrecs;
Dave Chinner68988112013-08-12 20:49:42 +1000326 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
327 xfs_trans_brelse(tp, bp);
328 if (nextbno == NULLFSBLOCK)
329 break;
330 bno = nextbno;
331 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
332 XFS_BMAP_BTREE_REF,
333 &xfs_bmbt_buf_ops);
334 if (error)
335 return error;
336 *count += 1;
337 block = XFS_BUF_TO_BLOCK(bp);
338 }
339 }
340 return 0;
341}
342
343/*
Darrick J. Wongd29cb3e2017-06-16 11:00:12 -0700344 * Count fsblocks of the given fork. Delayed allocation extents are
345 * not counted towards the totals.
Dave Chinner68988112013-08-12 20:49:42 +1000346 */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700347int
Dave Chinner68988112013-08-12 20:49:42 +1000348xfs_bmap_count_blocks(
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700349 struct xfs_trans *tp,
350 struct xfs_inode *ip,
351 int whichfork,
352 xfs_extnum_t *nextents,
353 xfs_filblks_t *count)
Dave Chinner68988112013-08-12 20:49:42 +1000354{
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700355 struct xfs_mount *mp; /* file system mount structure */
Dave Chinner68988112013-08-12 20:49:42 +1000356 __be64 *pp; /* pointer to block address */
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700357 struct xfs_btree_block *block; /* current btree block */
358 struct xfs_ifork *ifp; /* fork structure */
359 xfs_fsblock_t bno; /* block # of "block" */
360 int level; /* btree level, for checking */
361 int error;
Dave Chinner68988112013-08-12 20:49:42 +1000362
363 bno = NULLFSBLOCK;
364 mp = ip->i_mount;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700365 *nextents = 0;
366 *count = 0;
Dave Chinner68988112013-08-12 20:49:42 +1000367 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700368 if (!ifp)
Dave Chinner68988112013-08-12 20:49:42 +1000369 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000370
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700371 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
372 case XFS_DINODE_FMT_EXTENTS:
373 xfs_bmap_count_leaves(ifp, nextents, count);
374 return 0;
375 case XFS_DINODE_FMT_BTREE:
376 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
377 error = xfs_iread_extents(tp, ip, whichfork);
378 if (error)
379 return error;
380 }
Dave Chinner68988112013-08-12 20:49:42 +1000381
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -0700382 /*
383 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
384 */
385 block = ifp->if_broot;
386 level = be16_to_cpu(block->bb_level);
387 ASSERT(level > 0);
388 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
389 bno = be64_to_cpu(*pp);
390 ASSERT(bno != NULLFSBLOCK);
391 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
392 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
393
394 error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
395 nextents, count);
396 if (error) {
397 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
398 XFS_ERRLEVEL_LOW, mp);
399 return -EFSCORRUPTED;
400 }
401 return 0;
Dave Chinner68988112013-08-12 20:49:42 +1000402 }
403
404 return 0;
405}
406
407/*
408 * returns 1 for success, 0 if we failed to map the extent.
409 */
410STATIC int
411xfs_getbmapx_fix_eof_hole(
412 xfs_inode_t *ip, /* xfs incore inode pointer */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700413 int whichfork,
Dave Chinner68988112013-08-12 20:49:42 +1000414 struct getbmapx *out, /* output structure */
415 int prealloced, /* this is a file with
416 * preallocated data space */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700417 int64_t end, /* last block requested */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700418 xfs_fsblock_t startblock,
419 bool moretocome)
Dave Chinner68988112013-08-12 20:49:42 +1000420{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700421 int64_t fixlen;
Dave Chinner68988112013-08-12 20:49:42 +1000422 xfs_mount_t *mp; /* file system mount point */
423 xfs_ifork_t *ifp; /* inode fork pointer */
424 xfs_extnum_t lastx; /* last extent pointer */
425 xfs_fileoff_t fileblock;
426
427 if (startblock == HOLESTARTBLOCK) {
428 mp = ip->i_mount;
429 out->bmv_block = -1;
430 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
431 fixlen -= out->bmv_offset;
432 if (prealloced && out->bmv_offset + out->bmv_length == end) {
433 /* Came to hole at EOF. Trim it. */
434 if (fixlen <= 0)
435 return 0;
436 out->bmv_length = fixlen;
437 }
438 } else {
439 if (startblock == DELAYSTARTBLOCK)
440 out->bmv_block = -2;
441 else
442 out->bmv_block = xfs_fsb_to_db(ip, startblock);
443 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700444 ifp = XFS_IFORK_PTR(ip, whichfork);
445 if (!moretocome &&
446 xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
Eric Sandeen5d829302016-11-08 12:59:42 +1100447 (lastx == xfs_iext_count(ifp) - 1))
Dave Chinner68988112013-08-12 20:49:42 +1000448 out->bmv_oflags |= BMV_OF_LAST;
449 }
450
451 return 1;
452}
453
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700454/* Adjust the reported bmap around shared/unshared extent transitions. */
455STATIC int
456xfs_getbmap_adjust_shared(
457 struct xfs_inode *ip,
458 int whichfork,
459 struct xfs_bmbt_irec *map,
460 struct getbmapx *out,
461 struct xfs_bmbt_irec *next_map)
462{
463 struct xfs_mount *mp = ip->i_mount;
464 xfs_agnumber_t agno;
465 xfs_agblock_t agbno;
466 xfs_agblock_t ebno;
467 xfs_extlen_t elen;
468 xfs_extlen_t nlen;
469 int error;
470
471 next_map->br_startblock = NULLFSBLOCK;
472 next_map->br_startoff = NULLFILEOFF;
473 next_map->br_blockcount = 0;
474
475 /* Only written data blocks can be shared. */
Christoph Hellwig9c4f29d2017-03-28 14:53:35 -0700476 if (!xfs_is_reflink_inode(ip) ||
477 whichfork != XFS_DATA_FORK ||
478 !xfs_bmap_is_real_extent(map))
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700479 return 0;
480
481 agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
482 agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
Darrick J. Wong92ff7282017-06-16 11:00:10 -0700483 error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
484 map->br_blockcount, &ebno, &elen, true);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700485 if (error)
486 return error;
487
488 if (ebno == NULLAGBLOCK) {
489 /* No shared blocks at all. */
490 return 0;
491 } else if (agbno == ebno) {
492 /*
493 * Shared extent at (agbno, elen). Shrink the reported
494 * extent length and prepare to move the start of map[i]
495 * to agbno+elen, with the aim of (re)formatting the new
496 * map[i] the next time through the inner loop.
497 */
498 out->bmv_length = XFS_FSB_TO_BB(mp, elen);
499 out->bmv_oflags |= BMV_OF_SHARED;
500 if (elen != map->br_blockcount) {
501 *next_map = *map;
502 next_map->br_startblock += elen;
503 next_map->br_startoff += elen;
504 next_map->br_blockcount -= elen;
505 }
506 map->br_blockcount -= elen;
507 } else {
508 /*
509 * There's an unshared extent (agbno, ebno - agbno)
510 * followed by shared extent at (ebno, elen). Shrink
511 * the reported extent length to cover only the unshared
512 * extent and prepare to move up the start of map[i] to
513 * ebno, with the aim of (re)formatting the new map[i]
514 * the next time through the inner loop.
515 */
516 *next_map = *map;
517 nlen = ebno - agbno;
518 out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
519 next_map->br_startblock += nlen;
520 next_map->br_startoff += nlen;
521 next_map->br_blockcount -= nlen;
522 map->br_blockcount -= nlen;
523 }
524
525 return 0;
526}
527
Dave Chinner68988112013-08-12 20:49:42 +1000528/*
529 * Get inode's extents as described in bmv, and format for output.
530 * Calls formatter to fill the user's buffer until all extents
531 * are mapped, until the passed-in bmv->bmv_count slots have
532 * been filled, or until the formatter short-circuits the loop,
533 * if it is tracking filled-in extents on its own.
534 */
535int /* error code */
536xfs_getbmap(
537 xfs_inode_t *ip,
538 struct getbmapx *bmv, /* user bmap structure */
539 xfs_bmap_format_t formatter, /* format to user */
540 void *arg) /* formatter arg */
541{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700542 int64_t bmvend; /* last block requested */
Dave Chinner68988112013-08-12 20:49:42 +1000543 int error = 0; /* return value */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700544 int64_t fixlen; /* length for -1 case */
Dave Chinner68988112013-08-12 20:49:42 +1000545 int i; /* extent number */
546 int lock; /* lock state */
547 xfs_bmbt_irec_t *map; /* buffer for user's data */
548 xfs_mount_t *mp; /* file system mount point */
549 int nex; /* # of user extents can do */
Dave Chinner68988112013-08-12 20:49:42 +1000550 int subnex; /* # of bmapi's can do */
551 int nmap; /* number of map entries */
552 struct getbmapx *out; /* output structure */
553 int whichfork; /* data or attr fork */
554 int prealloced; /* this is a file with
555 * preallocated data space */
556 int iflags; /* interface flags */
557 int bmapi_flags; /* flags for xfs_bmapi */
558 int cur_ext = 0;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700559 struct xfs_bmbt_irec inject_map;
Dave Chinner68988112013-08-12 20:49:42 +1000560
561 mp = ip->i_mount;
562 iflags = bmv->bmv_iflags;
Dave Chinner68988112013-08-12 20:49:42 +1000563
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700564#ifndef DEBUG
565 /* Only allow CoW fork queries if we're debugging. */
566 if (iflags & BMV_IF_COWFORK)
567 return -EINVAL;
568#endif
569 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
570 return -EINVAL;
571
572 if (iflags & BMV_IF_ATTRFORK)
573 whichfork = XFS_ATTR_FORK;
574 else if (iflags & BMV_IF_COWFORK)
575 whichfork = XFS_COW_FORK;
576 else
577 whichfork = XFS_DATA_FORK;
578
579 switch (whichfork) {
580 case XFS_ATTR_FORK:
Dave Chinner68988112013-08-12 20:49:42 +1000581 if (XFS_IFORK_Q(ip)) {
582 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
583 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
584 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +1000585 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000586 } else if (unlikely(
587 ip->i_d.di_aformat != 0 &&
588 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
589 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
590 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +1000591 return -EFSCORRUPTED;
Dave Chinner68988112013-08-12 20:49:42 +1000592 }
593
594 prealloced = 0;
595 fixlen = 1LL << 32;
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700596 break;
597 case XFS_COW_FORK:
598 if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
599 return -EINVAL;
600
Darrick J. Wongf7ca3522016-10-03 09:11:43 -0700601 if (xfs_get_cowextsz_hint(ip)) {
602 prealloced = 1;
603 fixlen = mp->m_super->s_maxbytes;
604 } else {
605 prealloced = 0;
606 fixlen = XFS_ISIZE(ip);
607 }
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700608 break;
609 default:
Darrick J. Wong6eadbf42017-05-12 10:44:08 -0700610 /* Local format data forks report no extents. */
611 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
612 bmv->bmv_entries = 0;
613 return 0;
614 }
Dave Chinner68988112013-08-12 20:49:42 +1000615 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
Darrick J. Wong6eadbf42017-05-12 10:44:08 -0700616 ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +1000617 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000618
619 if (xfs_get_extsz_hint(ip) ||
620 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
621 prealloced = 1;
622 fixlen = mp->m_super->s_maxbytes;
623 } else {
624 prealloced = 0;
625 fixlen = XFS_ISIZE(ip);
626 }
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700627 break;
Dave Chinner68988112013-08-12 20:49:42 +1000628 }
629
630 if (bmv->bmv_length == -1) {
631 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
632 bmv->bmv_length =
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700633 max_t(int64_t, fixlen - bmv->bmv_offset, 0);
Dave Chinner68988112013-08-12 20:49:42 +1000634 } else if (bmv->bmv_length == 0) {
635 bmv->bmv_entries = 0;
636 return 0;
637 } else if (bmv->bmv_length < 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000638 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000639 }
640
641 nex = bmv->bmv_count - 1;
642 if (nex <= 0)
Dave Chinner24513372014-06-25 14:58:08 +1000643 return -EINVAL;
Dave Chinner68988112013-08-12 20:49:42 +1000644 bmvend = bmv->bmv_offset + bmv->bmv_length;
645
646
647 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
Dave Chinner24513372014-06-25 14:58:08 +1000648 return -ENOMEM;
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000649 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
650 if (!out)
Dave Chinner24513372014-06-25 14:58:08 +1000651 return -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000652
653 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700654 switch (whichfork) {
655 case XFS_DATA_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800656 if (!(iflags & BMV_IF_DELALLOC) &&
657 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
Dave Chinner24513372014-06-25 14:58:08 +1000658 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
Dave Chinner68988112013-08-12 20:49:42 +1000659 if (error)
660 goto out_unlock_iolock;
Dave Chinner68988112013-08-12 20:49:42 +1000661
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800662 /*
663 * Even after flushing the inode, there can still be
664 * delalloc blocks on the inode beyond EOF due to
665 * speculative preallocation. These are not removed
666 * until the release function is called or the inode
667 * is inactivated. Hence we cannot assert here that
668 * ip->i_delayed_blks == 0.
669 */
670 }
671
672 lock = xfs_ilock_data_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700673 break;
674 case XFS_COW_FORK:
675 lock = XFS_ILOCK_SHARED;
676 xfs_ilock(ip, lock);
677 break;
678 case XFS_ATTR_FORK:
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800679 lock = xfs_ilock_attr_map_shared(ip);
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700680 break;
Christoph Hellwigefa70be2013-12-18 02:14:39 -0800681 }
Dave Chinner68988112013-08-12 20:49:42 +1000682
683 /*
684 * Don't let nex be bigger than the number of extents
685 * we can have assuming alternating holes and real extents.
686 */
687 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
688 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
689
690 bmapi_flags = xfs_bmapi_aflag(whichfork);
691 if (!(iflags & BMV_IF_PREALLOC))
692 bmapi_flags |= XFS_BMAPI_IGSTATE;
693
694 /*
695 * Allocate enough space to handle "subnex" maps at a time.
696 */
Dave Chinner24513372014-06-25 14:58:08 +1000697 error = -ENOMEM;
Dave Chinner68988112013-08-12 20:49:42 +1000698 subnex = 16;
699 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
700 if (!map)
701 goto out_unlock_ilock;
702
703 bmv->bmv_entries = 0;
704
705 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
706 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
707 error = 0;
708 goto out_free_map;
709 }
710
Dave Chinner68988112013-08-12 20:49:42 +1000711 do {
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800712 nmap = (nex> subnex) ? subnex : nex;
Dave Chinner68988112013-08-12 20:49:42 +1000713 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
714 XFS_BB_TO_FSB(mp, bmv->bmv_length),
715 map, &nmap, bmapi_flags);
716 if (error)
717 goto out_free_map;
718 ASSERT(nmap <= subnex);
719
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800720 for (i = 0; i < nmap && bmv->bmv_length &&
721 cur_ext < bmv->bmv_count - 1; i++) {
Dave Chinner68988112013-08-12 20:49:42 +1000722 out[cur_ext].bmv_oflags = 0;
723 if (map[i].br_state == XFS_EXT_UNWRITTEN)
724 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
725 else if (map[i].br_startblock == DELAYSTARTBLOCK)
726 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
727 out[cur_ext].bmv_offset =
728 XFS_FSB_TO_BB(mp, map[i].br_startoff);
729 out[cur_ext].bmv_length =
730 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
731 out[cur_ext].bmv_unused1 = 0;
732 out[cur_ext].bmv_unused2 = 0;
733
734 /*
735 * delayed allocation extents that start beyond EOF can
736 * occur due to speculative EOF allocation when the
737 * delalloc extent is larger than the largest freespace
738 * extent at conversion time. These extents cannot be
739 * converted by data writeback, so can exist here even
740 * if we are not supposed to be finding delalloc
741 * extents.
742 */
743 if (map[i].br_startblock == DELAYSTARTBLOCK &&
Zorro Lang892d2a52017-05-15 08:40:02 -0700744 map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
Dave Chinner68988112013-08-12 20:49:42 +1000745 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
746
747 if (map[i].br_startblock == HOLESTARTBLOCK &&
748 whichfork == XFS_ATTR_FORK) {
749 /* came to the end of attribute fork */
750 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
751 goto out_free_map;
752 }
753
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700754 /* Is this a shared block? */
755 error = xfs_getbmap_adjust_shared(ip, whichfork,
756 &map[i], &out[cur_ext], &inject_map);
757 if (error)
758 goto out_free_map;
759
760 if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
761 &out[cur_ext], prealloced, bmvend,
762 map[i].br_startblock,
763 inject_map.br_startblock != NULLFSBLOCK))
Dave Chinner68988112013-08-12 20:49:42 +1000764 goto out_free_map;
765
766 bmv->bmv_offset =
767 out[cur_ext].bmv_offset +
768 out[cur_ext].bmv_length;
769 bmv->bmv_length =
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700770 max_t(int64_t, 0, bmvend - bmv->bmv_offset);
Dave Chinner68988112013-08-12 20:49:42 +1000771
772 /*
773 * In case we don't want to return the hole,
774 * don't increase cur_ext so that we can reuse
775 * it in the next loop.
776 */
777 if ((iflags & BMV_IF_NO_HOLES) &&
778 map[i].br_startblock == HOLESTARTBLOCK) {
779 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
780 continue;
781 }
782
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800783 /*
784 * In order to report shared extents accurately,
785 * we report each distinct shared/unshared part
786 * of a single bmbt record using multiple bmap
787 * extents. To make that happen, we iterate the
788 * same map array item multiple times, each
789 * time trimming out the subextent that we just
790 * reported.
791 *
792 * Because of this, we must check the out array
793 * index (cur_ext) directly against bmv_count-1
794 * to avoid overflows.
795 */
Darrick J. Wongf86f4032016-10-03 09:11:41 -0700796 if (inject_map.br_startblock != NULLFSBLOCK) {
797 map[i] = inject_map;
798 i--;
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800799 }
Dave Chinner68988112013-08-12 20:49:42 +1000800 bmv->bmv_entries++;
801 cur_ext++;
802 }
Darrick J. Wongc364b6d2017-01-26 09:50:30 -0800803 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
Dave Chinner68988112013-08-12 20:49:42 +1000804
805 out_free_map:
806 kmem_free(map);
807 out_unlock_ilock:
Christoph Hellwig01f4f322013-12-06 12:30:08 -0800808 xfs_iunlock(ip, lock);
Dave Chinner68988112013-08-12 20:49:42 +1000809 out_unlock_iolock:
810 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
811
812 for (i = 0; i < cur_ext; i++) {
Dave Chinner68988112013-08-12 20:49:42 +1000813 /* format results & advance arg */
Eric Sandeen1dbba082017-01-27 23:24:28 -0800814 error = formatter(&arg, &out[i]);
815 if (error)
Dave Chinner68988112013-08-12 20:49:42 +1000816 break;
817 }
818
Dave Chinnerfdd3cce2013-09-02 20:53:00 +1000819 kmem_free(out);
Dave Chinner68988112013-08-12 20:49:42 +1000820 return error;
821}
822
823/*
824 * dead simple method of punching delalyed allocation blocks from a range in
825 * the inode. Walks a block at a time so will be slow, but is only executed in
Zhi Yong Wuad4809b2013-08-12 03:14:55 +0000826 * rare error cases so the overhead is not critical. This will always punch out
Dave Chinner68988112013-08-12 20:49:42 +1000827 * both the start and end blocks, even if the ranges only partially overlap
828 * them, so it is up to the caller to ensure that partial blocks are not
829 * passed in.
830 */
831int
832xfs_bmap_punch_delalloc_range(
833 struct xfs_inode *ip,
834 xfs_fileoff_t start_fsb,
835 xfs_fileoff_t length)
836{
837 xfs_fileoff_t remaining = length;
838 int error = 0;
839
840 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
841
842 do {
843 int done;
844 xfs_bmbt_irec_t imap;
845 int nimaps = 1;
846 xfs_fsblock_t firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000847 struct xfs_defer_ops dfops;
Dave Chinner68988112013-08-12 20:49:42 +1000848
849 /*
850 * Map the range first and check that it is a delalloc extent
851 * before trying to unmap the range. Otherwise we will be
852 * trying to remove a real extent (which requires a
853 * transaction) or a hole, which is probably a bad idea...
854 */
855 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
856 XFS_BMAPI_ENTIRE);
857
858 if (error) {
859 /* something screwed, just bail */
860 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
861 xfs_alert(ip->i_mount,
862 "Failed delalloc mapping lookup ino %lld fsb %lld.",
863 ip->i_ino, start_fsb);
864 }
865 break;
866 }
867 if (!nimaps) {
868 /* nothing there */
869 goto next_block;
870 }
871 if (imap.br_startblock != DELAYSTARTBLOCK) {
872 /* been converted, ignore */
873 goto next_block;
874 }
875 WARN_ON(imap.br_blockcount == 0);
876
877 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000878 * Note: while we initialise the firstblock/dfops pair, they
Dave Chinner68988112013-08-12 20:49:42 +1000879 * should never be used because blocks should never be
880 * allocated or freed for a delalloc extent and hence we need
881 * don't cancel or finish them after the xfs_bunmapi() call.
882 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000883 xfs_defer_init(&dfops, &firstblock);
Dave Chinner68988112013-08-12 20:49:42 +1000884 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000885 &dfops, &done);
Dave Chinner68988112013-08-12 20:49:42 +1000886 if (error)
887 break;
888
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000889 ASSERT(!xfs_defer_has_unfinished_work(&dfops));
Dave Chinner68988112013-08-12 20:49:42 +1000890next_block:
891 start_fsb++;
892 remaining--;
893 } while(remaining > 0);
894
895 return error;
896}
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000897
898/*
899 * Test whether it is appropriate to check an inode for and free post EOF
900 * blocks. The 'force' parameter determines whether we should also consider
901 * regular files that are marked preallocated or append-only.
902 */
903bool
904xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
905{
906 /* prealloc/delalloc exists only on regular files */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100907 if (!S_ISREG(VFS_I(ip)->i_mode))
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000908 return false;
909
910 /*
911 * Zero sized files with no cached pages and delalloc blocks will not
912 * have speculative prealloc/delalloc blocks to remove.
913 */
914 if (VFS_I(ip)->i_size == 0 &&
Dave Chinner2667c6f2014-08-04 13:23:15 +1000915 VFS_I(ip)->i_mapping->nrpages == 0 &&
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000916 ip->i_delayed_blks == 0)
917 return false;
918
919 /* If we haven't read in the extent list, then don't do it now. */
920 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
921 return false;
922
923 /*
924 * Do not free real preallocated or append-only files unless the file
925 * has delalloc blocks and we are forced to remove them.
926 */
927 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
928 if (!force || ip->i_delayed_blks == 0)
929 return false;
930
931 return true;
932}
933
934/*
Brian Foster3b4683c2017-04-11 10:50:05 -0700935 * This is called to free any blocks beyond eof. The caller must hold
936 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
937 * reference to the inode.
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000938 */
939int
940xfs_free_eofblocks(
Brian Fostera36b9262017-01-27 23:22:55 -0800941 struct xfs_inode *ip)
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000942{
Brian Fostera36b9262017-01-27 23:22:55 -0800943 struct xfs_trans *tp;
944 int error;
945 xfs_fileoff_t end_fsb;
946 xfs_fileoff_t last_fsb;
947 xfs_filblks_t map_len;
948 int nimaps;
949 struct xfs_bmbt_irec imap;
950 struct xfs_mount *mp = ip->i_mount;
951
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000952 /*
953 * Figure out if there are any blocks beyond the end
954 * of the file. If not, then there is nothing to do.
955 */
956 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
957 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
958 if (last_fsb <= end_fsb)
959 return 0;
960 map_len = last_fsb - end_fsb;
961
962 nimaps = 1;
963 xfs_ilock(ip, XFS_ILOCK_SHARED);
964 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
965 xfs_iunlock(ip, XFS_ILOCK_SHARED);
966
Brian Fostera36b9262017-01-27 23:22:55 -0800967 /*
968 * If there are blocks after the end of file, truncate the file to its
969 * current size to free them up.
970 */
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000971 if (!error && (nimaps != 0) &&
972 (imap.br_startblock != HOLESTARTBLOCK ||
973 ip->i_delayed_blks)) {
974 /*
975 * Attach the dquots to the inode up front.
976 */
977 error = xfs_qm_dqattach(ip, 0);
978 if (error)
979 return error;
980
Brian Fostere4229d6b2017-01-27 23:22:57 -0800981 /* wait on dio to ensure i_size has settled */
982 inode_dio_wait(VFS_I(ip));
983
Christoph Hellwig253f4912016-04-06 09:19:55 +1000984 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
985 &tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000986 if (error) {
987 ASSERT(XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +1000988 return error;
989 }
990
991 xfs_ilock(ip, XFS_ILOCK_EXCL);
992 xfs_trans_ijoin(tp, ip, 0);
993
994 /*
995 * Do not update the on-disk file size. If we update the
996 * on-disk file size and then the system crashes before the
997 * contents of the file are flushed to disk then the files
998 * may be full of holes (ie NULL files bug).
999 */
1000 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
1001 XFS_ISIZE(ip));
1002 if (error) {
1003 /*
1004 * If we get an error at this point we simply don't
1005 * bother truncating the file.
1006 */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001007 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001008 } else {
Christoph Hellwig70393312015-06-04 13:48:08 +10001009 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001010 if (!error)
1011 xfs_inode_clear_eofblocks_tag(ip);
1012 }
1013
1014 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001015 }
1016 return error;
1017}
1018
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001019int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001020xfs_alloc_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001021 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001022 xfs_off_t offset,
1023 xfs_off_t len,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001024 int alloc_type)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001025{
1026 xfs_mount_t *mp = ip->i_mount;
1027 xfs_off_t count;
1028 xfs_filblks_t allocated_fsb;
1029 xfs_filblks_t allocatesize_fsb;
1030 xfs_extlen_t extsz, temp;
1031 xfs_fileoff_t startoffset_fsb;
1032 xfs_fsblock_t firstfsb;
1033 int nimaps;
1034 int quota_flag;
1035 int rt;
1036 xfs_trans_t *tp;
1037 xfs_bmbt_irec_t imaps[1], *imapp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001038 struct xfs_defer_ops dfops;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001039 uint qblocks, resblks, resrtextents;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001040 int error;
1041
1042 trace_xfs_alloc_file_space(ip);
1043
1044 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10001045 return -EIO;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001046
1047 error = xfs_qm_dqattach(ip, 0);
1048 if (error)
1049 return error;
1050
1051 if (len <= 0)
Dave Chinner24513372014-06-25 14:58:08 +10001052 return -EINVAL;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001053
1054 rt = XFS_IS_REALTIME_INODE(ip);
1055 extsz = xfs_get_extsz_hint(ip);
1056
1057 count = len;
1058 imapp = &imaps[0];
1059 nimaps = 1;
1060 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1061 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1062
1063 /*
1064 * Allocate file space until done or until there is an error
1065 */
1066 while (allocatesize_fsb && !error) {
1067 xfs_fileoff_t s, e;
1068
1069 /*
1070 * Determine space reservations for data/realtime.
1071 */
1072 if (unlikely(extsz)) {
1073 s = startoffset_fsb;
1074 do_div(s, extsz);
1075 s *= extsz;
1076 e = startoffset_fsb + allocatesize_fsb;
1077 if ((temp = do_mod(startoffset_fsb, extsz)))
1078 e += temp;
1079 if ((temp = do_mod(e, extsz)))
1080 e += extsz - temp;
1081 } else {
1082 s = 0;
1083 e = allocatesize_fsb;
1084 }
1085
1086 /*
1087 * The transaction reservation is limited to a 32-bit block
1088 * count, hence we need to limit the number of blocks we are
1089 * trying to reserve to avoid an overflow. We can't allocate
1090 * more than @nimaps extents, and an extent is limited on disk
1091 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1092 */
1093 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1094 if (unlikely(rt)) {
1095 resrtextents = qblocks = resblks;
1096 resrtextents /= mp->m_sb.sb_rextsize;
1097 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1098 quota_flag = XFS_QMOPT_RES_RTBLKS;
1099 } else {
1100 resrtextents = 0;
1101 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1102 quota_flag = XFS_QMOPT_RES_REGBLKS;
1103 }
1104
1105 /*
1106 * Allocate and setup the transaction.
1107 */
Christoph Hellwig253f4912016-04-06 09:19:55 +10001108 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1109 resrtextents, 0, &tp);
1110
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001111 /*
1112 * Check for running out of space
1113 */
1114 if (error) {
1115 /*
1116 * Free the transaction structure.
1117 */
Dave Chinner24513372014-06-25 14:58:08 +10001118 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001119 break;
1120 }
1121 xfs_ilock(ip, XFS_ILOCK_EXCL);
1122 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1123 0, quota_flag);
1124 if (error)
1125 goto error1;
1126
1127 xfs_trans_ijoin(tp, ip, 0);
1128
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001129 xfs_defer_init(&dfops, &firstfsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001130 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1131 allocatesize_fsb, alloc_type, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001132 resblks, imapp, &nimaps, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001133 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001134 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001135
1136 /*
1137 * Complete the transaction
1138 */
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001139 error = xfs_defer_finish(&tp, &dfops);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001140 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001141 goto error0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001142
Christoph Hellwig70393312015-06-04 13:48:08 +10001143 error = xfs_trans_commit(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001144 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Eric Sandeenf6106ef2016-01-11 11:34:01 +11001145 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001146 break;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001147
1148 allocated_fsb = imapp->br_blockcount;
1149
1150 if (nimaps == 0) {
Dave Chinner24513372014-06-25 14:58:08 +10001151 error = -ENOSPC;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001152 break;
1153 }
1154
1155 startoffset_fsb += allocated_fsb;
1156 allocatesize_fsb -= allocated_fsb;
1157 }
1158
1159 return error;
1160
1161error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001162 xfs_defer_cancel(&dfops);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001163 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1164
1165error1: /* Just cancel transaction */
Christoph Hellwig4906e212015-06-04 13:47:56 +10001166 xfs_trans_cancel(tp);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001167 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1168 return error;
1169}
1170
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001171static int
1172xfs_unmap_extent(
1173 struct xfs_inode *ip,
1174 xfs_fileoff_t startoffset_fsb,
1175 xfs_filblks_t len_fsb,
1176 int *done)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001177{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001178 struct xfs_mount *mp = ip->i_mount;
1179 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001180 struct xfs_defer_ops dfops;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001181 xfs_fsblock_t firstfsb;
1182 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1183 int error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001184
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001185 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1186 if (error) {
1187 ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1188 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001189 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001190
1191 xfs_ilock(ip, XFS_ILOCK_EXCL);
1192 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1193 ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1194 if (error)
1195 goto out_trans_cancel;
1196
1197 xfs_trans_ijoin(tp, ip, 0);
1198
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001199 xfs_defer_init(&dfops, &firstfsb);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001200 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001201 &dfops, done);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001202 if (error)
1203 goto out_bmap_cancel;
1204
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001205 xfs_defer_ijoin(&dfops, ip);
1206 error = xfs_defer_finish(&tp, &dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001207 if (error)
1208 goto out_bmap_cancel;
1209
1210 error = xfs_trans_commit(tp);
1211out_unlock:
1212 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001213 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001214
1215out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001216 xfs_defer_cancel(&dfops);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001217out_trans_cancel:
1218 xfs_trans_cancel(tp);
1219 goto out_unlock;
1220}
1221
1222static int
1223xfs_adjust_extent_unmap_boundaries(
1224 struct xfs_inode *ip,
1225 xfs_fileoff_t *startoffset_fsb,
1226 xfs_fileoff_t *endoffset_fsb)
1227{
1228 struct xfs_mount *mp = ip->i_mount;
1229 struct xfs_bmbt_irec imap;
1230 int nimap, error;
1231 xfs_extlen_t mod = 0;
1232
1233 nimap = 1;
1234 error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1235 if (error)
1236 return error;
1237
1238 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001239 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
Eric Sandeen4f1adf32017-04-19 15:19:32 -07001240 mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001241 if (mod)
1242 *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1243 }
1244
1245 nimap = 1;
1246 error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1247 if (error)
1248 return error;
1249
1250 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1251 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1252 mod++;
1253 if (mod && mod != mp->m_sb.sb_rextsize)
1254 *endoffset_fsb -= mod;
1255 }
1256
1257 return 0;
1258}
1259
1260static int
1261xfs_flush_unmap_range(
1262 struct xfs_inode *ip,
1263 xfs_off_t offset,
1264 xfs_off_t len)
1265{
1266 struct xfs_mount *mp = ip->i_mount;
1267 struct inode *inode = VFS_I(ip);
1268 xfs_off_t rounding, start, end;
1269 int error;
1270
1271 /* wait for the completion of any pending DIOs */
1272 inode_dio_wait(inode);
1273
1274 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1275 start = round_down(offset, rounding);
1276 end = round_up(offset + len, rounding) - 1;
1277
1278 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1279 if (error)
1280 return error;
1281 truncate_pagecache_range(inode, start, end);
1282 return 0;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001283}
1284
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001285int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001286xfs_free_file_space(
Christoph Hellwig83aee9e2013-10-12 00:55:07 -07001287 struct xfs_inode *ip,
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001288 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001289 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001290{
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001291 struct xfs_mount *mp = ip->i_mount;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001292 xfs_fileoff_t startoffset_fsb;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001293 xfs_fileoff_t endoffset_fsb;
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001294 int done = 0, error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001295
1296 trace_xfs_free_file_space(ip);
1297
1298 error = xfs_qm_dqattach(ip, 0);
1299 if (error)
1300 return error;
1301
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001302 if (len <= 0) /* if nothing being freed */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001303 return 0;
1304
1305 error = xfs_flush_unmap_range(ip, offset, len);
1306 if (error)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001307 return error;
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001308
1309 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001310 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1311
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001312 /*
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001313 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1314 * and we can't use unwritten extents then we actually need to ensure
1315 * to zero the whole extent, otherwise we just need to take of block
1316 * boundaries, and xfs_bunmapi will handle the rest.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001317 */
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001318 if (XFS_IS_REALTIME_INODE(ip) &&
1319 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1320 error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1321 &endoffset_fsb);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001322 if (error)
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001323 return error;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001324 }
Christoph Hellwigbdb0d042016-06-21 10:00:55 +10001325
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001326 if (endoffset_fsb > startoffset_fsb) {
1327 while (!done) {
1328 error = xfs_unmap_extent(ip, startoffset_fsb,
1329 endoffset_fsb - startoffset_fsb, &done);
1330 if (error)
1331 return error;
1332 }
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001333 }
1334
1335 /*
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001336 * Now that we've unmap all full blocks we'll have to zero out any
1337 * partial block at the beginning and/or end. xfs_zero_range is
Calvin Owens3dd09d52017-04-03 12:22:29 -07001338 * smart enough to skip any holes, including those we just created,
1339 * but we must take care not to zero beyond EOF and enlarge i_size.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001340 */
Calvin Owens3dd09d52017-04-03 12:22:29 -07001341
1342 if (offset >= XFS_ISIZE(ip))
1343 return 0;
1344
1345 if (offset + len > XFS_ISIZE(ip))
1346 len = XFS_ISIZE(ip) - offset;
1347
Christoph Hellwig3c2bdc92016-06-21 10:02:23 +10001348 return xfs_zero_range(ip, offset, len, NULL);
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001349}
1350
Brian Foster5d11fb42014-10-30 10:35:11 +11001351/*
1352 * Preallocate and zero a range of a file. This mechanism has the allocation
1353 * semantics of fallocate and in addition converts data in the range to zeroes.
1354 */
Christoph Hellwig865e9442013-10-12 00:55:08 -07001355int
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001356xfs_zero_file_space(
1357 struct xfs_inode *ip,
1358 xfs_off_t offset,
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001359 xfs_off_t len)
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001360{
1361 struct xfs_mount *mp = ip->i_mount;
Brian Foster5d11fb42014-10-30 10:35:11 +11001362 uint blksize;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001363 int error;
1364
Dave Chinner897b73b2014-04-14 18:15:11 +10001365 trace_xfs_zero_file_space(ip);
1366
Brian Foster5d11fb42014-10-30 10:35:11 +11001367 blksize = 1 << mp->m_sb.sb_blocklog;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001368
1369 /*
Brian Foster5d11fb42014-10-30 10:35:11 +11001370 * Punch a hole and prealloc the range. We use hole punch rather than
1371 * unwritten extent conversion for two reasons:
1372 *
1373 * 1.) Hole punch handles partial block zeroing for us.
1374 *
1375 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1376 * by virtue of the hole punch.
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001377 */
Brian Foster5d11fb42014-10-30 10:35:11 +11001378 error = xfs_free_file_space(ip, offset, len);
1379 if (error)
1380 goto out;
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001381
Brian Foster5d11fb42014-10-30 10:35:11 +11001382 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1383 round_up(offset + len, blksize) -
1384 round_down(offset, blksize),
1385 XFS_BMAPI_PREALLOC);
Christoph Hellwig5f8aca82013-10-12 00:55:06 -07001386out:
Dave Chinnerc24b5df2013-08-12 20:49:45 +10001387 return error;
1388
1389}
1390
1391/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001392 * @next_fsb will keep track of the extent currently undergoing shift.
1393 * @stop_fsb will keep track of the extent at which we have to stop.
1394 * If we are shifting left, we will start with block (offset + len) and
1395 * shift each extent till last extent.
1396 * If we are shifting right, we will start with last extent inside file space
1397 * and continue until we reach the block corresponding to offset.
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001398 */
kbuild test robot72c1a732015-04-13 11:25:04 +10001399static int
Namjae Jeona904b1c2015-03-25 15:08:56 +11001400xfs_shift_file_space(
1401 struct xfs_inode *ip,
1402 xfs_off_t offset,
1403 xfs_off_t len,
1404 enum shift_direction direction)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001405{
1406 int done = 0;
1407 struct xfs_mount *mp = ip->i_mount;
1408 struct xfs_trans *tp;
1409 int error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001410 struct xfs_defer_ops dfops;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001411 xfs_fsblock_t first_block;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001412 xfs_fileoff_t stop_fsb;
Brian Foster2c845f52014-09-23 15:37:09 +10001413 xfs_fileoff_t next_fsb;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001414 xfs_fileoff_t shift_fsb;
Brian Foster48af96a2017-02-15 10:18:10 -08001415 uint resblks;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001416
Namjae Jeona904b1c2015-03-25 15:08:56 +11001417 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001418
Namjae Jeona904b1c2015-03-25 15:08:56 +11001419 if (direction == SHIFT_LEFT) {
Brian Foster48af96a2017-02-15 10:18:10 -08001420 /*
1421 * Reserve blocks to cover potential extent merges after left
1422 * shift operations.
1423 */
1424 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
Namjae Jeona904b1c2015-03-25 15:08:56 +11001425 next_fsb = XFS_B_TO_FSB(mp, offset + len);
1426 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1427 } else {
1428 /*
1429 * If right shift, delegate the work of initialization of
1430 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1431 */
Brian Foster48af96a2017-02-15 10:18:10 -08001432 resblks = 0;
Namjae Jeona904b1c2015-03-25 15:08:56 +11001433 next_fsb = NULLFSBLOCK;
1434 stop_fsb = XFS_B_TO_FSB(mp, offset);
1435 }
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001436
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001437 shift_fsb = XFS_B_TO_FSB(mp, len);
1438
Brian Fosterf71721d2014-09-23 15:39:05 +10001439 /*
1440 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1441 * into the accessible region of the file.
1442 */
Brian Foster41b9d722014-09-02 12:12:53 +10001443 if (xfs_can_free_eofblocks(ip, true)) {
Brian Fostera36b9262017-01-27 23:22:55 -08001444 error = xfs_free_eofblocks(ip);
Brian Foster41b9d722014-09-02 12:12:53 +10001445 if (error)
1446 return error;
1447 }
Dave Chinner1669a8c2014-09-02 12:12:53 +10001448
Brian Fosterf71721d2014-09-23 15:39:05 +10001449 /*
1450 * Writeback and invalidate cache for the remainder of the file as we're
Namjae Jeona904b1c2015-03-25 15:08:56 +11001451 * about to shift down every extent from offset to EOF.
Brian Fosterf71721d2014-09-23 15:39:05 +10001452 */
1453 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001454 offset, -1);
Brian Fosterf71721d2014-09-23 15:39:05 +10001455 if (error)
1456 return error;
1457 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001458 offset >> PAGE_SHIFT, -1);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001459 if (error)
1460 return error;
1461
Namjae Jeona904b1c2015-03-25 15:08:56 +11001462 /*
1463 * The extent shiting code works on extent granularity. So, if
1464 * stop_fsb is not the starting block of extent, we need to split
1465 * the extent at stop_fsb.
1466 */
1467 if (direction == SHIFT_RIGHT) {
1468 error = xfs_bmap_split_extent(ip, stop_fsb);
1469 if (error)
1470 return error;
1471 }
1472
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001473 while (!error && !done) {
Brian Foster48af96a2017-02-15 10:18:10 -08001474 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1475 &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001476 if (error)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001477 break;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001478
1479 xfs_ilock(ip, XFS_ILOCK_EXCL);
1480 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
Brian Foster48af96a2017-02-15 10:18:10 -08001481 ip->i_gdquot, ip->i_pdquot, resblks, 0,
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001482 XFS_QMOPT_RES_REGBLKS);
1483 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001484 goto out_trans_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001485
Namjae Jeona904b1c2015-03-25 15:08:56 +11001486 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001487
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001488 xfs_defer_init(&dfops, &first_block);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001489
1490 /*
1491 * We are using the write transaction in which max 2 bmbt
1492 * updates are allowed
1493 */
Namjae Jeona904b1c2015-03-25 15:08:56 +11001494 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001495 &done, stop_fsb, &first_block, &dfops,
Namjae Jeona904b1c2015-03-25 15:08:56 +11001496 direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001497 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001498 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001499
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001500 error = xfs_defer_finish(&tp, &dfops);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001501 if (error)
Brian Fosterd4a97a02015-08-19 10:01:40 +10001502 goto out_bmap_cancel;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001503
Christoph Hellwig70393312015-06-04 13:48:08 +10001504 error = xfs_trans_commit(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001505 }
1506
1507 return error;
1508
Brian Fosterd4a97a02015-08-19 10:01:40 +10001509out_bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001510 xfs_defer_cancel(&dfops);
Brian Fosterd4a97a02015-08-19 10:01:40 +10001511out_trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001512 xfs_trans_cancel(tp);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11001513 return error;
1514}
1515
1516/*
Namjae Jeona904b1c2015-03-25 15:08:56 +11001517 * xfs_collapse_file_space()
1518 * This routine frees disk space and shift extent for the given file.
1519 * The first thing we do is to free data blocks in the specified range
1520 * by calling xfs_free_file_space(). It would also sync dirty data
1521 * and invalidate page cache over the region on which collapse range
1522 * is working. And Shift extent records to the left to cover a hole.
1523 * RETURNS:
1524 * 0 on success
1525 * errno on error
1526 *
1527 */
1528int
1529xfs_collapse_file_space(
1530 struct xfs_inode *ip,
1531 xfs_off_t offset,
1532 xfs_off_t len)
1533{
1534 int error;
1535
1536 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1537 trace_xfs_collapse_file_space(ip);
1538
1539 error = xfs_free_file_space(ip, offset, len);
1540 if (error)
1541 return error;
1542
1543 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1544}
1545
1546/*
1547 * xfs_insert_file_space()
1548 * This routine create hole space by shifting extents for the given file.
1549 * The first thing we do is to sync dirty data and invalidate page cache
1550 * over the region on which insert range is working. And split an extent
1551 * to two extents at given offset by calling xfs_bmap_split_extent.
1552 * And shift all extent records which are laying between [offset,
1553 * last allocated extent] to the right to reserve hole range.
1554 * RETURNS:
1555 * 0 on success
1556 * errno on error
1557 */
1558int
1559xfs_insert_file_space(
1560 struct xfs_inode *ip,
1561 loff_t offset,
1562 loff_t len)
1563{
1564 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1565 trace_xfs_insert_file_space(ip);
1566
1567 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1568}
1569
1570/*
Dave Chinnera133d952013-08-12 20:49:48 +10001571 * We need to check that the format of the data fork in the temporary inode is
1572 * valid for the target inode before doing the swap. This is not a problem with
1573 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1574 * data fork depending on the space the attribute fork is taking so we can get
1575 * invalid formats on the target inode.
1576 *
1577 * E.g. target has space for 7 extents in extent format, temp inode only has
1578 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1579 * btree, but when swapped it needs to be in extent format. Hence we can't just
1580 * blindly swap data forks on attr2 filesystems.
1581 *
1582 * Note that we check the swap in both directions so that we don't end up with
1583 * a corrupt temporary inode, either.
1584 *
1585 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1586 * inode will prevent this situation from occurring, so all we do here is
1587 * reject and log the attempt. basically we are putting the responsibility on
1588 * userspace to get this right.
1589 */
1590static int
1591xfs_swap_extents_check_format(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001592 struct xfs_inode *ip, /* target inode */
1593 struct xfs_inode *tip) /* tmp inode */
Dave Chinnera133d952013-08-12 20:49:48 +10001594{
1595
1596 /* Should never get a local format */
1597 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1598 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
Dave Chinner24513372014-06-25 14:58:08 +10001599 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001600
1601 /*
1602 * if the target inode has less extents that then temporary inode then
1603 * why did userspace call us?
1604 */
1605 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
Dave Chinner24513372014-06-25 14:58:08 +10001606 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001607
1608 /*
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001609 * If we have to use the (expensive) rmap swap method, we can
1610 * handle any number of extents and any format.
1611 */
1612 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1613 return 0;
1614
1615 /*
Dave Chinnera133d952013-08-12 20:49:48 +10001616 * if the target inode is in extent form and the temp inode is in btree
1617 * form then we will end up with the target inode in the wrong format
1618 * as we already know there are less extents in the temp inode.
1619 */
1620 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1621 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Dave Chinner24513372014-06-25 14:58:08 +10001622 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001623
1624 /* Check temp in extent form to max in target */
1625 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1626 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1627 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001628 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001629
1630 /* Check target in extent form to max in temp */
1631 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1632 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1633 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001634 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001635
1636 /*
1637 * If we are in a btree format, check that the temp root block will fit
1638 * in the target and that it has enough extents to be in btree format
1639 * in the target.
1640 *
1641 * Note that we have to be careful to allow btree->extent conversions
1642 * (a common defrag case) which will occur when the temp inode is in
1643 * extent format...
1644 */
1645 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001646 if (XFS_IFORK_Q(ip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001647 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
Dave Chinner24513372014-06-25 14:58:08 +10001648 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001649 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1650 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001651 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001652 }
1653
1654 /* Reciprocal target->temp btree format checks */
1655 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
Arnd Bergmann0cbe48c2017-06-14 21:35:34 -07001656 if (XFS_IFORK_Q(tip) &&
Dave Chinnera133d952013-08-12 20:49:48 +10001657 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
Dave Chinner24513372014-06-25 14:58:08 +10001658 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001659 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1660 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
Dave Chinner24513372014-06-25 14:58:08 +10001661 return -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10001662 }
1663
1664 return 0;
1665}
1666
Dave Chinner7abbb8f2014-09-23 16:20:11 +10001667static int
Dave Chinner4ef897a2014-08-04 13:44:08 +10001668xfs_swap_extent_flush(
1669 struct xfs_inode *ip)
1670{
1671 int error;
1672
1673 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1674 if (error)
1675 return error;
1676 truncate_pagecache_range(VFS_I(ip), 0, -1);
1677
1678 /* Verify O_DIRECT for ftmp */
1679 if (VFS_I(ip)->i_mapping->nrpages)
1680 return -EINVAL;
Dave Chinner4ef897a2014-08-04 13:44:08 +10001681 return 0;
1682}
1683
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001684/*
1685 * Move extents from one file to another, when rmap is enabled.
1686 */
1687STATIC int
1688xfs_swap_extent_rmap(
1689 struct xfs_trans **tpp,
1690 struct xfs_inode *ip,
1691 struct xfs_inode *tip)
1692{
1693 struct xfs_bmbt_irec irec;
1694 struct xfs_bmbt_irec uirec;
1695 struct xfs_bmbt_irec tirec;
1696 xfs_fileoff_t offset_fsb;
1697 xfs_fileoff_t end_fsb;
1698 xfs_filblks_t count_fsb;
1699 xfs_fsblock_t firstfsb;
1700 struct xfs_defer_ops dfops;
1701 int error;
1702 xfs_filblks_t ilen;
1703 xfs_filblks_t rlen;
1704 int nimaps;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001705 uint64_t tip_flags2;
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001706
1707 /*
1708 * If the source file has shared blocks, we must flag the donor
1709 * file as having shared blocks so that we get the shared-block
1710 * rmap functions when we go to fix up the rmaps. The flags
1711 * will be switch for reals later.
1712 */
1713 tip_flags2 = tip->i_d.di_flags2;
1714 if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1715 tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1716
1717 offset_fsb = 0;
1718 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1719 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1720
1721 while (count_fsb) {
1722 /* Read extent from the donor file */
1723 nimaps = 1;
1724 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1725 &nimaps, 0);
1726 if (error)
1727 goto out;
1728 ASSERT(nimaps == 1);
1729 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1730
1731 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1732 ilen = tirec.br_blockcount;
1733
1734 /* Unmap the old blocks in the source file. */
1735 while (tirec.br_blockcount) {
1736 xfs_defer_init(&dfops, &firstfsb);
1737 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1738
1739 /* Read extent from the source file */
1740 nimaps = 1;
1741 error = xfs_bmapi_read(ip, tirec.br_startoff,
1742 tirec.br_blockcount, &irec,
1743 &nimaps, 0);
1744 if (error)
1745 goto out_defer;
1746 ASSERT(nimaps == 1);
1747 ASSERT(tirec.br_startoff == irec.br_startoff);
1748 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1749
1750 /* Trim the extent. */
1751 uirec = tirec;
1752 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1753 tirec.br_blockcount,
1754 irec.br_blockcount);
1755 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1756
1757 /* Remove the mapping from the donor file. */
1758 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1759 tip, &uirec);
1760 if (error)
1761 goto out_defer;
1762
1763 /* Remove the mapping from the source file. */
1764 error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1765 ip, &irec);
1766 if (error)
1767 goto out_defer;
1768
1769 /* Map the donor file's blocks into the source file. */
1770 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1771 ip, &uirec);
1772 if (error)
1773 goto out_defer;
1774
1775 /* Map the source file's blocks into the donor file. */
1776 error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1777 tip, &irec);
1778 if (error)
1779 goto out_defer;
1780
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001781 xfs_defer_ijoin(&dfops, ip);
1782 error = xfs_defer_finish(tpp, &dfops);
Darrick J. Wong1f08af52016-10-03 09:11:53 -07001783 if (error)
1784 goto out_defer;
1785
1786 tirec.br_startoff += rlen;
1787 if (tirec.br_startblock != HOLESTARTBLOCK &&
1788 tirec.br_startblock != DELAYSTARTBLOCK)
1789 tirec.br_startblock += rlen;
1790 tirec.br_blockcount -= rlen;
1791 }
1792
1793 /* Roll on... */
1794 count_fsb -= ilen;
1795 offset_fsb += ilen;
1796 }
1797
1798 tip->i_d.di_flags2 = tip_flags2;
1799 return 0;
1800
1801out_defer:
1802 xfs_defer_cancel(&dfops);
1803out:
1804 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1805 tip->i_d.di_flags2 = tip_flags2;
1806 return error;
1807}
1808
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001809/* Swap the extents of two files by swapping data forks. */
1810STATIC int
1811xfs_swap_extent_forks(
1812 struct xfs_trans *tp,
1813 struct xfs_inode *ip,
1814 struct xfs_inode *tip,
1815 int *src_log_flags,
1816 int *target_log_flags)
1817{
1818 struct xfs_ifork tempifp, *ifp, *tifp;
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001819 xfs_filblks_t aforkblks = 0;
1820 xfs_filblks_t taforkblks = 0;
1821 xfs_extnum_t junk;
Eric Sandeen4dfce572016-11-08 12:55:18 +11001822 xfs_extnum_t nextents;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001823 uint64_t tmp;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001824 int error;
1825
1826 /*
1827 * Count the number of extended attribute blocks
1828 */
1829 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1830 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001831 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001832 &aforkblks);
1833 if (error)
1834 return error;
1835 }
1836 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1837 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
Darrick J. Wonge7f5d5c2017-06-16 11:00:12 -07001838 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001839 &taforkblks);
1840 if (error)
1841 return error;
1842 }
1843
1844 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07001845 * Btree format (v3) inodes have the inode number stamped in the bmbt
1846 * block headers. We can't start changing the bmbt blocks until the
1847 * inode owner change is logged so recovery does the right thing in the
1848 * event of a crash. Set the owner change log flags now and leave the
1849 * bmbt scan as the last step.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001850 */
1851 if (ip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001852 ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001853 (*target_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001854 if (tip->i_d.di_version == 3 &&
Brian Foster6fb10d62017-08-29 10:08:39 -07001855 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001856 (*src_log_flags) |= XFS_ILOG_DOWNER;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001857
1858 /*
1859 * Swap the data forks of the inodes
1860 */
1861 ifp = &ip->i_df;
1862 tifp = &tip->i_df;
1863 tempifp = *ifp; /* struct copy */
1864 *ifp = *tifp; /* struct copy */
1865 *tifp = tempifp; /* struct copy */
1866
1867 /*
1868 * Fix the on-disk inode values
1869 */
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001870 tmp = (uint64_t)ip->i_d.di_nblocks;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001871 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1872 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1873
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001874 tmp = (uint64_t) ip->i_d.di_nextents;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001875 ip->i_d.di_nextents = tip->i_d.di_nextents;
1876 tip->i_d.di_nextents = tmp;
1877
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001878 tmp = (uint64_t) ip->i_d.di_format;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001879 ip->i_d.di_format = tip->i_d.di_format;
1880 tip->i_d.di_format = tmp;
1881
1882 /*
1883 * The extents in the source inode could still contain speculative
1884 * preallocation beyond EOF (e.g. the file is open but not modified
1885 * while defrag is in progress). In that case, we need to copy over the
1886 * number of delalloc blocks the data fork in the source inode is
1887 * tracking beyond EOF so that when the fork is truncated away when the
1888 * temporary inode is unlinked we don't underrun the i_delayed_blks
1889 * counter on that inode.
1890 */
1891 ASSERT(tip->i_delayed_blks == 0);
1892 tip->i_delayed_blks = ip->i_delayed_blks;
1893 ip->i_delayed_blks = 0;
1894
1895 switch (ip->i_d.di_format) {
1896 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001897 /*
1898 * If the extents fit in the inode, fix the pointer. Otherwise
1899 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001900 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001901 nextents = xfs_iext_count(&ip->i_df);
1902 if (nextents <= XFS_INLINE_EXTS)
1903 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001904 (*src_log_flags) |= XFS_ILOG_DEXT;
1905 break;
1906 case XFS_DINODE_FMT_BTREE:
1907 ASSERT(ip->i_d.di_version < 3 ||
1908 (*src_log_flags & XFS_ILOG_DOWNER));
1909 (*src_log_flags) |= XFS_ILOG_DBROOT;
1910 break;
1911 }
1912
1913 switch (tip->i_d.di_format) {
1914 case XFS_DINODE_FMT_EXTENTS:
Eric Sandeen5d829302016-11-08 12:59:42 +11001915 /*
1916 * If the extents fit in the inode, fix the pointer. Otherwise
1917 * it's already NULL or pointing to the extent.
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001918 */
Eric Sandeen5d829302016-11-08 12:59:42 +11001919 nextents = xfs_iext_count(&tip->i_df);
1920 if (nextents <= XFS_INLINE_EXTS)
1921 tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07001922 (*target_log_flags) |= XFS_ILOG_DEXT;
1923 break;
1924 case XFS_DINODE_FMT_BTREE:
1925 (*target_log_flags) |= XFS_ILOG_DBROOT;
1926 ASSERT(tip->i_d.di_version < 3 ||
1927 (*target_log_flags & XFS_ILOG_DOWNER));
1928 break;
1929 }
1930
1931 return 0;
1932}
1933
Brian Foster2dd3d702017-08-29 10:08:40 -07001934/*
1935 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1936 * change owner scan attempts to order all modified buffers in the current
1937 * transaction. In the event of ordered buffer failure, the offending buffer is
1938 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1939 * the transaction in this case to replenish the fallback log reservation and
1940 * restart the scan. This process repeats until the scan completes.
1941 */
1942static int
1943xfs_swap_change_owner(
1944 struct xfs_trans **tpp,
1945 struct xfs_inode *ip,
1946 struct xfs_inode *tmpip)
1947{
1948 int error;
1949 struct xfs_trans *tp = *tpp;
1950
1951 do {
1952 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1953 NULL);
1954 /* success or fatal error */
1955 if (error != -EAGAIN)
1956 break;
1957
1958 error = xfs_trans_roll(tpp);
1959 if (error)
1960 break;
1961 tp = *tpp;
1962
1963 /*
1964 * Redirty both inodes so they can relog and keep the log tail
1965 * moving forward.
1966 */
1967 xfs_trans_ijoin(tp, ip, 0);
1968 xfs_trans_ijoin(tp, tmpip, 0);
1969 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1970 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1971 } while (true);
1972
1973 return error;
1974}
1975
Dave Chinner4ef897a2014-08-04 13:44:08 +10001976int
Dave Chinnera133d952013-08-12 20:49:48 +10001977xfs_swap_extents(
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001978 struct xfs_inode *ip, /* target inode */
1979 struct xfs_inode *tip, /* tmp inode */
1980 struct xfs_swapext *sxp)
Dave Chinnera133d952013-08-12 20:49:48 +10001981{
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001982 struct xfs_mount *mp = ip->i_mount;
1983 struct xfs_trans *tp;
1984 struct xfs_bstat *sbp = &sxp->sx_stat;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001985 int src_log_flags, target_log_flags;
1986 int error = 0;
Darrick J. Wonge06259a2016-10-03 09:11:52 -07001987 int lock_flags;
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07001988 struct xfs_ifork *cowfp;
Darrick J. Wongc8ce5402017-06-16 11:00:05 -07001989 uint64_t f;
Brian Foster2dd3d702017-08-29 10:08:40 -07001990 int resblks = 0;
Dave Chinnera133d952013-08-12 20:49:48 +10001991
Dave Chinnera133d952013-08-12 20:49:48 +10001992 /*
Dave Chinner723cac42015-02-23 21:47:29 +11001993 * Lock the inodes against other IO, page faults and truncate to
1994 * begin with. Then we can ensure the inodes are flushed and have no
1995 * page cache safely. Once we have done this we can take the ilocks and
1996 * do the rest of the checks.
Dave Chinnera133d952013-08-12 20:49:48 +10001997 */
Christoph Hellwig65523212016-11-30 14:33:25 +11001998 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1999 lock_flags = XFS_MMAPLOCK_EXCL;
Dave Chinner723cac42015-02-23 21:47:29 +11002000 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
Dave Chinnera133d952013-08-12 20:49:48 +10002001
2002 /* Verify that both files have the same format */
Dave Chinnerc19b3b052016-02-09 16:54:58 +11002003 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
Dave Chinner24513372014-06-25 14:58:08 +10002004 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10002005 goto out_unlock;
2006 }
2007
2008 /* Verify both files are either real-time or non-realtime */
2009 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
Dave Chinner24513372014-06-25 14:58:08 +10002010 error = -EINVAL;
Dave Chinnera133d952013-08-12 20:49:48 +10002011 goto out_unlock;
2012 }
2013
Dave Chinner4ef897a2014-08-04 13:44:08 +10002014 error = xfs_swap_extent_flush(ip);
Dave Chinnera133d952013-08-12 20:49:48 +10002015 if (error)
2016 goto out_unlock;
Dave Chinner4ef897a2014-08-04 13:44:08 +10002017 error = xfs_swap_extent_flush(tip);
2018 if (error)
2019 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002020
Darrick J. Wong1f08af52016-10-03 09:11:53 -07002021 /*
2022 * Extent "swapping" with rmap requires a permanent reservation and
2023 * a block reservation because it's really just a remap operation
2024 * performed with log redo items!
2025 */
2026 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
2027 /*
2028 * Conceptually this shouldn't affect the shape of either
2029 * bmbt, but since we atomically move extents one by one,
2030 * we reserve enough space to rebuild both trees.
2031 */
2032 resblks = XFS_SWAP_RMAP_SPACE_RES(mp,
2033 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK),
2034 XFS_DATA_FORK) +
2035 XFS_SWAP_RMAP_SPACE_RES(mp,
2036 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
2037 XFS_DATA_FORK);
Brian Foster2dd3d702017-08-29 10:08:40 -07002038 }
2039 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10002040 if (error)
Dave Chinnera133d952013-08-12 20:49:48 +10002041 goto out_unlock;
Dave Chinner723cac42015-02-23 21:47:29 +11002042
2043 /*
2044 * Lock and join the inodes to the tansaction so that transaction commit
2045 * or cancel will unlock the inodes from this point onwards.
2046 */
Dave Chinner4ef897a2014-08-04 13:44:08 +10002047 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
2048 lock_flags |= XFS_ILOCK_EXCL;
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002049 xfs_trans_ijoin(tp, ip, 0);
2050 xfs_trans_ijoin(tp, tip, 0);
Dave Chinner723cac42015-02-23 21:47:29 +11002051
Dave Chinnera133d952013-08-12 20:49:48 +10002052
2053 /* Verify all data are being swapped */
2054 if (sxp->sx_offset != 0 ||
2055 sxp->sx_length != ip->i_d.di_size ||
2056 sxp->sx_length != tip->i_d.di_size) {
Dave Chinner24513372014-06-25 14:58:08 +10002057 error = -EFAULT;
Dave Chinner4ef897a2014-08-04 13:44:08 +10002058 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002059 }
2060
2061 trace_xfs_swap_extent_before(ip, 0);
2062 trace_xfs_swap_extent_before(tip, 1);
2063
2064 /* check inode formats now that data is flushed */
2065 error = xfs_swap_extents_check_format(ip, tip);
2066 if (error) {
2067 xfs_notice(mp,
2068 "%s: inode 0x%llx format is incompatible for exchanging.",
2069 __func__, ip->i_ino);
Dave Chinner4ef897a2014-08-04 13:44:08 +10002070 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002071 }
2072
2073 /*
2074 * Compare the current change & modify times with that
2075 * passed in. If they differ, we abort this swap.
2076 * This is the mechanism used to ensure the calling
2077 * process that the file was not changed out from
2078 * under it.
2079 */
2080 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
2081 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
2082 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
2083 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
Dave Chinner24513372014-06-25 14:58:08 +10002084 error = -EBUSY;
Dave Chinner81217682014-08-04 13:29:32 +10002085 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002086 }
Dave Chinnera133d952013-08-12 20:49:48 +10002087
Dave Chinner21b5c972013-08-30 10:23:44 +10002088 /*
Dave Chinner21b5c972013-08-30 10:23:44 +10002089 * Note the trickiness in setting the log flags - we set the owner log
2090 * flag on the opposite inode (i.e. the inode we are setting the new
2091 * owner to be) because once we swap the forks and log that, log
2092 * recovery is going to see the fork as owned by the swapped inode,
2093 * not the pre-swapped inodes.
2094 */
2095 src_log_flags = XFS_ILOG_CORE;
2096 target_log_flags = XFS_ILOG_CORE;
Dave Chinner21b5c972013-08-30 10:23:44 +10002097
Darrick J. Wong1f08af52016-10-03 09:11:53 -07002098 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2099 error = xfs_swap_extent_rmap(&tp, ip, tip);
2100 else
2101 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
2102 &target_log_flags);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002103 if (error)
2104 goto out_trans_cancel;
Dave Chinnera133d952013-08-12 20:49:48 +10002105
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002106 /* Do we have to swap reflink flags? */
2107 if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
2108 (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
2109 f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2110 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2111 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2112 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2113 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2114 cowfp = ip->i_cowfp;
2115 ip->i_cowfp = tip->i_cowfp;
2116 tip->i_cowfp = cowfp;
Darrick J. Wong83104d42016-10-03 09:11:46 -07002117 xfs_inode_set_cowblocks_tag(ip);
2118 xfs_inode_set_cowblocks_tag(tip);
Darrick J. Wongf0bc4d12016-10-03 09:11:42 -07002119 }
2120
Dave Chinnera133d952013-08-12 20:49:48 +10002121 xfs_trans_log_inode(tp, ip, src_log_flags);
2122 xfs_trans_log_inode(tp, tip, target_log_flags);
2123
2124 /*
Brian Foster6fb10d62017-08-29 10:08:39 -07002125 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
2126 * have inode number owner values in the bmbt blocks that still refer to
2127 * the old inode. Scan each bmbt to fix up the owner values with the
2128 * inode number of the current inode.
2129 */
2130 if (src_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002131 error = xfs_swap_change_owner(&tp, ip, tip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002132 if (error)
2133 goto out_trans_cancel;
2134 }
2135 if (target_log_flags & XFS_ILOG_DOWNER) {
Brian Foster2dd3d702017-08-29 10:08:40 -07002136 error = xfs_swap_change_owner(&tp, tip, ip);
Brian Foster6fb10d62017-08-29 10:08:39 -07002137 if (error)
2138 goto out_trans_cancel;
2139 }
2140
2141 /*
Dave Chinnera133d952013-08-12 20:49:48 +10002142 * If this is a synchronous mount, make sure that the
2143 * transaction goes to disk before returning to the user.
2144 */
2145 if (mp->m_flags & XFS_MOUNT_WSYNC)
2146 xfs_trans_set_sync(tp);
2147
Christoph Hellwig70393312015-06-04 13:48:08 +10002148 error = xfs_trans_commit(tp);
Dave Chinnera133d952013-08-12 20:49:48 +10002149
2150 trace_xfs_swap_extent_after(ip, 0);
2151 trace_xfs_swap_extent_after(tip, 1);
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002152
Christoph Hellwig65523212016-11-30 14:33:25 +11002153out_unlock:
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002154 xfs_iunlock(ip, lock_flags);
2155 xfs_iunlock(tip, lock_flags);
Christoph Hellwig65523212016-11-30 14:33:25 +11002156 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
Dave Chinnera133d952013-08-12 20:49:48 +10002157 return error;
2158
Darrick J. Wong39aff5f2016-10-03 09:11:53 -07002159out_trans_cancel:
2160 xfs_trans_cancel(tp);
Christoph Hellwig65523212016-11-30 14:33:25 +11002161 goto out_unlock;
Dave Chinnera133d952013-08-12 20:49:48 +10002162}