blob: 0fd051064ff0b95a4549a8235d5d0bb8046e8772 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
Nathan Scott7b718762005-11-02 14:58:39 +11003 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +110020#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110024#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_sb.h"
Dave Chinnerf5ea1102013-04-24 18:58:02 +100026#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100027#include "xfs_defer.h"
Dave Chinner57062782013-10-15 09:17:51 +110028#include "xfs_da_format.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_da_btree.h"
Dave Chinner2b9ab5a2013-08-12 20:49:37 +100030#include "xfs_dir2.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110032#include "xfs_btree.h"
Dave Chinner239880e2013-10-23 10:50:10 +110033#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110034#include "xfs_inode_item.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_extfree_item.h"
36#include "xfs_alloc.h"
37#include "xfs_bmap.h"
Dave Chinner68988112013-08-12 20:49:42 +100038#include "xfs_bmap_util.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110039#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "xfs_rtalloc.h"
Darrick J. Wonge9e899a2017-10-31 12:04:49 -070041#include "xfs_errortag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include "xfs_error.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include "xfs_quota.h"
44#include "xfs_trans_space.h"
45#include "xfs_buf_item.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000046#include "xfs_trace.h"
Dave Chinner19de7352013-04-03 16:11:18 +110047#include "xfs_symlink.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110048#include "xfs_attr_leaf.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110049#include "xfs_filestream.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100050#include "xfs_rmap.h"
Darrick J. Wong3fd129b2016-09-19 10:30:52 +100051#include "xfs_ag_resv.h"
Darrick J. Wong62aab202016-10-03 09:11:23 -070052#include "xfs_refcount.h"
Brian Foster974ae922016-11-28 14:57:42 +110053#include "xfs_icache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056kmem_zone_t *xfs_bmap_free_item_zone;
57
58/*
Dave Chinner9e5987a72013-02-25 12:31:26 +110059 * Miscellaneous helper functions
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 */
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062/*
Dave Chinner9e5987a72013-02-25 12:31:26 +110063 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 */
Dave Chinner9e5987a72013-02-25 12:31:26 +110066void
67xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
70{
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
78
79 /*
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
84 *
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
92 */
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
96 } else {
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
99 }
Eric Sandeen152d93b2014-04-14 18:58:51 +1000100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
106 maxblocks = 1;
107 else
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
109 }
110 mp->m_bm_maxlevels[whichfork] = level;
111}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100113STATIC int /* error */
114xfs_bmbt_lookup_eq(
115 struct xfs_btree_cur *cur,
Christoph Hellwige16cf9b2017-10-17 14:16:26 -0700116 struct xfs_bmbt_irec *irec,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100117 int *stat) /* success/failure */
118{
Christoph Hellwige16cf9b2017-10-17 14:16:26 -0700119 cur->bc_rec.b = *irec;
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100120 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
121}
122
123STATIC int /* error */
Christoph Hellwigb5cfbc22017-10-17 14:16:27 -0700124xfs_bmbt_lookup_first(
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100125 struct xfs_btree_cur *cur,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100126 int *stat) /* success/failure */
127{
Christoph Hellwigb5cfbc22017-10-17 14:16:27 -0700128 cur->bc_rec.b.br_startoff = 0;
129 cur->bc_rec.b.br_startblock = 0;
130 cur->bc_rec.b.br_blockcount = 0;
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100131 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
132}
133
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100134/*
Christoph Hellwig8096b1e2011-12-18 20:00:07 +0000135 * Check if the inode needs to be converted to btree format.
136 */
137static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
138{
Darrick J. Wong60b49842016-10-03 09:11:34 -0700139 return whichfork != XFS_COW_FORK &&
140 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
Christoph Hellwig8096b1e2011-12-18 20:00:07 +0000141 XFS_IFORK_NEXTENTS(ip, whichfork) >
142 XFS_IFORK_MAXEXT(ip, whichfork);
143}
144
145/*
146 * Check if the inode should be converted to extent format.
147 */
148static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
149{
Darrick J. Wong60b49842016-10-03 09:11:34 -0700150 return whichfork != XFS_COW_FORK &&
151 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
Christoph Hellwig8096b1e2011-12-18 20:00:07 +0000152 XFS_IFORK_NEXTENTS(ip, whichfork) <=
153 XFS_IFORK_MAXEXT(ip, whichfork);
154}
155
156/*
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700157 * Update the record referred to by cur to the value given by irec
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100158 * This either works (return 0) or gets an EFSCORRUPTED error.
159 */
160STATIC int
161xfs_bmbt_update(
162 struct xfs_btree_cur *cur,
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700163 struct xfs_bmbt_irec *irec)
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100164{
165 union xfs_btree_rec rec;
166
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700167 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
Christoph Hellwig278d0ca2008-10-30 16:56:32 +1100168 return xfs_btree_update(cur, &rec);
169}
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171/*
Dave Chinner9e5987a72013-02-25 12:31:26 +1100172 * Compute the worst-case number of indirect blocks that will be used
173 * for ip's delayed extent of length "len".
174 */
175STATIC xfs_filblks_t
176xfs_bmap_worst_indlen(
177 xfs_inode_t *ip, /* incore inode pointer */
178 xfs_filblks_t len) /* delayed extent length */
179{
180 int level; /* btree level number */
181 int maxrecs; /* maximum record count at this level */
182 xfs_mount_t *mp; /* mount structure */
183 xfs_filblks_t rval; /* return value */
184
185 mp = ip->i_mount;
186 maxrecs = mp->m_bmap_dmxr[0];
187 for (level = 0, rval = 0;
188 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
189 level++) {
190 len += maxrecs - 1;
191 do_div(len, maxrecs);
192 rval += len;
Darrick J. Wong5e5c9432017-09-18 09:41:17 -0700193 if (len == 1)
194 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
Dave Chinner9e5987a72013-02-25 12:31:26 +1100195 level - 1;
196 if (level == 0)
197 maxrecs = mp->m_bmap_dmxr[1];
198 }
199 return rval;
200}
201
202/*
203 * Calculate the default attribute fork offset for newly created inodes.
204 */
205uint
206xfs_default_attroffset(
207 struct xfs_inode *ip)
208{
209 struct xfs_mount *mp = ip->i_mount;
210 uint offset;
211
212 if (mp->m_sb.sb_inodesize == 256) {
Christoph Hellwig56cea2d2013-03-12 23:30:36 +1100213 offset = XFS_LITINO(mp, ip->i_d.di_version) -
Dave Chinner9e5987a72013-02-25 12:31:26 +1100214 XFS_BMDR_SPACE_CALC(MINABTPTRS);
215 } else {
216 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
217 }
218
Christoph Hellwig56cea2d2013-03-12 23:30:36 +1100219 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
Dave Chinner9e5987a72013-02-25 12:31:26 +1100220 return offset;
221}
222
223/*
224 * Helper routine to reset inode di_forkoff field when switching
225 * attribute fork from local to extent format - we reset it where
226 * possible to make space available for inline data fork extents.
227 */
228STATIC void
229xfs_bmap_forkoff_reset(
Dave Chinner9e5987a72013-02-25 12:31:26 +1100230 xfs_inode_t *ip,
231 int whichfork)
232{
233 if (whichfork == XFS_ATTR_FORK &&
234 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
Dave Chinner9e5987a72013-02-25 12:31:26 +1100235 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
236 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
237
238 if (dfl_forkoff > ip->i_d.di_forkoff)
239 ip->i_d.di_forkoff = dfl_forkoff;
240 }
241}
242
Dave Chinner9e5987a72013-02-25 12:31:26 +1100243#ifdef DEBUG
244STATIC struct xfs_buf *
245xfs_bmap_get_bp(
246 struct xfs_btree_cur *cur,
247 xfs_fsblock_t bno)
248{
Dave Chinnere6631f82018-05-09 07:49:37 -0700249 struct xfs_log_item *lip;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100250 int i;
251
252 if (!cur)
253 return NULL;
254
255 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
256 if (!cur->bc_bufs[i])
257 break;
258 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
259 return cur->bc_bufs[i];
260 }
261
262 /* Chase down all the log items to see if the bp is there */
Dave Chinnere6631f82018-05-09 07:49:37 -0700263 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
264 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
265
Dave Chinner9e5987a72013-02-25 12:31:26 +1100266 if (bip->bli_item.li_type == XFS_LI_BUF &&
267 XFS_BUF_ADDR(bip->bli_buf) == bno)
268 return bip->bli_buf;
269 }
270
271 return NULL;
272}
273
274STATIC void
275xfs_check_block(
276 struct xfs_btree_block *block,
277 xfs_mount_t *mp,
278 int root,
279 short sz)
280{
281 int i, j, dmxr;
282 __be64 *pp, *thispa; /* pointer to block address */
283 xfs_bmbt_key_t *prevp, *keyp;
284
285 ASSERT(be16_to_cpu(block->bb_level) > 0);
286
287 prevp = NULL;
288 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
289 dmxr = mp->m_bmap_dmxr[0];
290 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
291
292 if (prevp) {
293 ASSERT(be64_to_cpu(prevp->br_startoff) <
294 be64_to_cpu(keyp->br_startoff));
295 }
296 prevp = keyp;
297
298 /*
299 * Compare the block numbers to see if there are dups.
300 */
301 if (root)
302 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
303 else
304 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
305
306 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
307 if (root)
308 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
309 else
310 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
311 if (*thispa == *pp) {
312 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
313 __func__, j, i,
314 (unsigned long long)be64_to_cpu(*thispa));
Darrick J. Wongcec57252018-05-04 15:31:21 -0700315 xfs_err(mp, "%s: ptrs are equal in node\n",
Dave Chinner9e5987a72013-02-25 12:31:26 +1100316 __func__);
Darrick J. Wongcec57252018-05-04 15:31:21 -0700317 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100318 }
319 }
320 }
321}
322
323/*
324 * Check that the extents for the inode ip are in the right order in all
Dave Chinnere3543812016-01-08 11:28:49 +1100325 * btree leaves. THis becomes prohibitively expensive for large extent count
326 * files, so don't bother with inodes that have more than 10,000 extents in
327 * them. The btree record ordering checks will still be done, so for such large
328 * bmapbt constructs that is going to catch most corruptions.
Dave Chinner9e5987a72013-02-25 12:31:26 +1100329 */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100330STATIC void
331xfs_bmap_check_leaf_extents(
332 xfs_btree_cur_t *cur, /* btree cursor or null */
333 xfs_inode_t *ip, /* incore inode pointer */
334 int whichfork) /* data or attr fork */
335{
336 struct xfs_btree_block *block; /* current btree block */
337 xfs_fsblock_t bno; /* block # of "block" */
338 xfs_buf_t *bp; /* buffer for "block" */
339 int error; /* error return value */
340 xfs_extnum_t i=0, j; /* index into the extents list */
341 xfs_ifork_t *ifp; /* fork structure */
342 int level; /* btree level, for checking */
343 xfs_mount_t *mp; /* file system mount structure */
344 __be64 *pp; /* pointer to block address */
345 xfs_bmbt_rec_t *ep; /* pointer to current extent */
346 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
347 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
348 int bp_release = 0;
349
350 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
351 return;
352 }
353
Dave Chinnere3543812016-01-08 11:28:49 +1100354 /* skip large extent count inodes */
355 if (ip->i_d.di_nextents > 10000)
356 return;
357
Dave Chinner9e5987a72013-02-25 12:31:26 +1100358 bno = NULLFSBLOCK;
359 mp = ip->i_mount;
360 ifp = XFS_IFORK_PTR(ip, whichfork);
361 block = ifp->if_broot;
362 /*
363 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
364 */
365 level = be16_to_cpu(block->bb_level);
366 ASSERT(level > 0);
367 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
368 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
369 bno = be64_to_cpu(*pp);
370
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000371 ASSERT(bno != NULLFSBLOCK);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100372 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
373 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
374
375 /*
376 * Go down the tree until leaf level is reached, following the first
377 * pointer (leftmost) at each level.
378 */
379 while (level-- > 0) {
380 /* See if buf is in cur first */
381 bp_release = 0;
382 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
383 if (!bp) {
384 bp_release = 1;
385 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
386 XFS_BMAP_BTREE_REF,
387 &xfs_bmbt_buf_ops);
388 if (error)
389 goto error_norelse;
390 }
391 block = XFS_BUF_TO_BLOCK(bp);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100392 if (level == 0)
393 break;
394
395 /*
396 * Check this block for basic sanity (increasing keys and
397 * no duplicate blocks).
398 */
399
400 xfs_check_block(block, mp, 0, 0);
401 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
402 bno = be64_to_cpu(*pp);
Eric Sandeenc29aad42015-02-23 22:39:08 +1100403 XFS_WANT_CORRUPTED_GOTO(mp,
Darrick J. Wong59f6fec2018-01-08 10:51:00 -0800404 xfs_verify_fsbno(mp, bno), error0);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100405 if (bp_release) {
406 bp_release = 0;
407 xfs_trans_brelse(NULL, bp);
408 }
409 }
410
411 /*
412 * Here with bp and block set to the leftmost leaf node in the tree.
413 */
414 i = 0;
415
416 /*
417 * Loop over all leaf nodes checking that all extents are in the right order.
418 */
419 for (;;) {
420 xfs_fsblock_t nextbno;
421 xfs_extnum_t num_recs;
422
423
424 num_recs = xfs_btree_get_numrecs(block);
425
426 /*
427 * Read-ahead the next leaf block, if any.
428 */
429
430 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
431
432 /*
433 * Check all the extents to make sure they are OK.
434 * If we had a previous block, the last entry should
435 * conform with the first entry in this one.
436 */
437
438 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
439 if (i) {
440 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
441 xfs_bmbt_disk_get_blockcount(&last) <=
442 xfs_bmbt_disk_get_startoff(ep));
443 }
444 for (j = 1; j < num_recs; j++) {
445 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
446 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
447 xfs_bmbt_disk_get_blockcount(ep) <=
448 xfs_bmbt_disk_get_startoff(nextp));
449 ep = nextp;
450 }
451
452 last = *ep;
453 i += num_recs;
454 if (bp_release) {
455 bp_release = 0;
456 xfs_trans_brelse(NULL, bp);
457 }
458 bno = nextbno;
459 /*
460 * If we've reached the end, stop.
461 */
462 if (bno == NULLFSBLOCK)
463 break;
464
465 bp_release = 0;
466 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
467 if (!bp) {
468 bp_release = 1;
469 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
470 XFS_BMAP_BTREE_REF,
471 &xfs_bmbt_buf_ops);
472 if (error)
473 goto error_norelse;
474 }
475 block = XFS_BUF_TO_BLOCK(bp);
476 }
Luis de Bethencourta5fd2762016-03-09 08:17:56 +1100477
Dave Chinner9e5987a72013-02-25 12:31:26 +1100478 return;
479
480error0:
481 xfs_warn(mp, "%s: at error0", __func__);
482 if (bp_release)
483 xfs_trans_brelse(NULL, bp);
484error_norelse:
485 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
486 __func__, i);
Darrick J. Wongcec57252018-05-04 15:31:21 -0700487 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
488 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100489 return;
490}
491
492/*
Dave Chinner9e5987a72013-02-25 12:31:26 +1100493 * Validate that the bmbt_irecs being returned from bmapi are valid
Zhi Yong Wua97f4df2013-08-12 03:14:53 +0000494 * given the caller's original parameters. Specifically check the
495 * ranges of the returned irecs to ensure that they only extend beyond
Dave Chinner9e5987a72013-02-25 12:31:26 +1100496 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
497 */
498STATIC void
499xfs_bmap_validate_ret(
500 xfs_fileoff_t bno,
501 xfs_filblks_t len,
502 int flags,
503 xfs_bmbt_irec_t *mval,
504 int nmap,
505 int ret_nmap)
506{
507 int i; /* index to map values */
508
509 ASSERT(ret_nmap <= nmap);
510
511 for (i = 0; i < ret_nmap; i++) {
512 ASSERT(mval[i].br_blockcount > 0);
513 if (!(flags & XFS_BMAPI_ENTIRE)) {
514 ASSERT(mval[i].br_startoff >= bno);
515 ASSERT(mval[i].br_blockcount <= len);
516 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
517 bno + len);
518 } else {
519 ASSERT(mval[i].br_startoff < bno + len);
520 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
521 bno);
522 }
523 ASSERT(i == 0 ||
524 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
525 mval[i].br_startoff);
526 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
527 mval[i].br_startblock != HOLESTARTBLOCK);
528 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
529 mval[i].br_state == XFS_EXT_UNWRITTEN);
530 }
531}
532
533#else
534#define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
Darrick J. Wong7bf7a192017-08-31 15:11:06 -0700535#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
Dave Chinner9e5987a72013-02-25 12:31:26 +1100536#endif /* DEBUG */
537
538/*
539 * bmap free list manipulation functions
540 */
541
542/*
543 * Add the extent to the list of extents to be free at transaction end.
544 * The list is maintained sorted (by block number).
545 */
546void
Brian Fosterfcb762f2018-05-09 08:45:04 -0700547__xfs_bmap_add_free(
Darrick J. Wong340785c2016-08-03 11:33:42 +1000548 struct xfs_mount *mp,
549 struct xfs_defer_ops *dfops,
550 xfs_fsblock_t bno,
551 xfs_filblks_t len,
Brian Fosterfcb762f2018-05-09 08:45:04 -0700552 struct xfs_owner_info *oinfo,
553 bool skip_discard)
Dave Chinner9e5987a72013-02-25 12:31:26 +1100554{
Darrick J. Wong310a75a2016-08-03 11:18:10 +1000555 struct xfs_extent_free_item *new; /* new element */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100556#ifdef DEBUG
557 xfs_agnumber_t agno;
558 xfs_agblock_t agbno;
559
560 ASSERT(bno != NULLFSBLOCK);
561 ASSERT(len > 0);
562 ASSERT(len <= MAXEXTLEN);
563 ASSERT(!isnullstartblock(bno));
564 agno = XFS_FSB_TO_AGNO(mp, bno);
565 agbno = XFS_FSB_TO_AGBNO(mp, bno);
566 ASSERT(agno < mp->m_sb.sb_agcount);
567 ASSERT(agbno < mp->m_sb.sb_agblocks);
568 ASSERT(len < mp->m_sb.sb_agblocks);
569 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
570#endif
571 ASSERT(xfs_bmap_free_item_zone != NULL);
Darrick J. Wong340785c2016-08-03 11:33:42 +1000572
Dave Chinner9e5987a72013-02-25 12:31:26 +1100573 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
Darrick J. Wong310a75a2016-08-03 11:18:10 +1000574 new->xefi_startblock = bno;
575 new->xefi_blockcount = (xfs_extlen_t)len;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000576 if (oinfo)
577 new->xefi_oinfo = *oinfo;
578 else
579 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
Brian Fosterfcb762f2018-05-09 08:45:04 -0700580 new->xefi_skip_discard = skip_discard;
Darrick J. Wongba9e7802016-08-03 11:26:33 +1000581 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
582 XFS_FSB_TO_AGBNO(mp, bno), len);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000583 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100584}
585
586/*
587 * Inode fork format manipulation functions
588 */
589
590/*
591 * Transform a btree format file with only one leaf node, where the
592 * extents list will fit in the inode, into an extents format file.
593 * Since the file extents are already in-core, all we have to do is
594 * give up the space for the btree root and pitch the leaf block.
595 */
596STATIC int /* error */
597xfs_bmap_btree_to_extents(
598 xfs_trans_t *tp, /* transaction pointer */
599 xfs_inode_t *ip, /* incore inode pointer */
600 xfs_btree_cur_t *cur, /* btree cursor */
601 int *logflagsp, /* inode logging flags */
602 int whichfork) /* data or attr fork */
603{
604 /* REFERENCED */
605 struct xfs_btree_block *cblock;/* child btree block */
606 xfs_fsblock_t cbno; /* child block number */
607 xfs_buf_t *cbp; /* child block's buffer */
608 int error; /* error return value */
609 xfs_ifork_t *ifp; /* inode fork data */
610 xfs_mount_t *mp; /* mount point structure */
611 __be64 *pp; /* ptr to block address */
612 struct xfs_btree_block *rblock;/* root btree block */
Darrick J. Wong340785c2016-08-03 11:33:42 +1000613 struct xfs_owner_info oinfo;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100614
615 mp = ip->i_mount;
616 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wong60b49842016-10-03 09:11:34 -0700617 ASSERT(whichfork != XFS_COW_FORK);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100618 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
619 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
620 rblock = ifp->if_broot;
621 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
622 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
623 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
624 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
625 cbno = be64_to_cpu(*pp);
626 *logflagsp = 0;
627#ifdef DEBUG
Darrick J. Wongf1357612017-10-17 21:37:33 -0700628 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
629 xfs_btree_check_lptr(cur, cbno, 1));
Dave Chinner9e5987a72013-02-25 12:31:26 +1100630#endif
631 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
632 &xfs_bmbt_buf_ops);
633 if (error)
634 return error;
635 cblock = XFS_BUF_TO_BLOCK(cbp);
636 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
637 return error;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000638 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
639 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100640 ip->i_d.di_nblocks--;
641 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
642 xfs_trans_binval(tp, cbp);
643 if (cur->bc_bufs[0] == cbp)
644 cur->bc_bufs[0] = NULL;
645 xfs_iroot_realloc(ip, -1, whichfork);
646 ASSERT(ifp->if_broot == NULL);
647 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
648 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
649 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
650 return 0;
651}
652
653/*
654 * Convert an extents-format file into a btree-format file.
655 * The new file will have a root block (in the inode) and a single child block.
656 */
657STATIC int /* error */
658xfs_bmap_extents_to_btree(
659 xfs_trans_t *tp, /* transaction pointer */
660 xfs_inode_t *ip, /* incore inode pointer */
661 xfs_fsblock_t *firstblock, /* first-block-allocated */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000662 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100663 xfs_btree_cur_t **curp, /* cursor returned to caller */
664 int wasdel, /* converting a delayed alloc */
665 int *logflagsp, /* inode logging flags */
666 int whichfork) /* data or attr fork */
667{
668 struct xfs_btree_block *ablock; /* allocated (child) bt block */
669 xfs_buf_t *abp; /* buffer for ablock */
670 xfs_alloc_arg_t args; /* allocation arguments */
671 xfs_bmbt_rec_t *arp; /* child record pointer */
672 struct xfs_btree_block *block; /* btree root block */
673 xfs_btree_cur_t *cur; /* bmap btree cursor */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100674 int error; /* error return value */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100675 xfs_ifork_t *ifp; /* inode fork pointer */
676 xfs_bmbt_key_t *kp; /* root block key pointer */
677 xfs_mount_t *mp; /* mount structure */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100678 xfs_bmbt_ptr_t *pp; /* root block address pointer */
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700679 struct xfs_iext_cursor icur;
Christoph Hellwig906abed2017-11-03 10:34:43 -0700680 struct xfs_bmbt_irec rec;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700681 xfs_extnum_t cnt = 0;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100682
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500683 mp = ip->i_mount;
Darrick J. Wong60b49842016-10-03 09:11:34 -0700684 ASSERT(whichfork != XFS_COW_FORK);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100685 ifp = XFS_IFORK_PTR(ip, whichfork);
686 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
687
688 /*
689 * Make space in the inode incore.
690 */
691 xfs_iroot_realloc(ip, 1, whichfork);
692 ifp->if_flags |= XFS_IFBROOT;
693
694 /*
695 * Fill in the root.
696 */
697 block = ifp->if_broot;
Eric Sandeenb6f41e42017-01-27 23:16:39 -0800698 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
699 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
Eric Sandeenf88ae462017-01-27 23:16:37 -0800700 XFS_BTREE_LONG_PTRS);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100701 /*
702 * Need a cursor. Can't allocate until bb_level is filled in.
703 */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100704 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
705 cur->bc_private.b.firstblock = *firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000706 cur->bc_private.b.dfops = dfops;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100707 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
708 /*
709 * Convert to a btree with two levels, one record in root.
710 */
711 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
712 memset(&args, 0, sizeof(args));
713 args.tp = tp;
714 args.mp = mp;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000715 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100716 args.firstblock = *firstblock;
717 if (*firstblock == NULLFSBLOCK) {
718 args.type = XFS_ALLOCTYPE_START_BNO;
719 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000720 } else if (dfops->dop_low) {
Dave Chinner9e5987a72013-02-25 12:31:26 +1100721 args.type = XFS_ALLOCTYPE_START_BNO;
722 args.fsbno = *firstblock;
723 } else {
724 args.type = XFS_ALLOCTYPE_NEAR_BNO;
725 args.fsbno = *firstblock;
726 }
727 args.minlen = args.maxlen = args.prod = 1;
728 args.wasdel = wasdel;
729 *logflagsp = 0;
730 if ((error = xfs_alloc_vextent(&args))) {
731 xfs_iroot_realloc(ip, -1, whichfork);
Eric Sandeen2c4306f2018-04-16 23:07:27 -0700732 ASSERT(ifp->if_broot == NULL);
733 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100734 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
735 return error;
736 }
Darrick J. Wong90e20562016-10-03 09:11:45 -0700737
Christoph Hellwig2fcc3192017-03-08 10:38:53 -0800738 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
739 xfs_iroot_realloc(ip, -1, whichfork);
Eric Sandeen2c4306f2018-04-16 23:07:27 -0700740 ASSERT(ifp->if_broot == NULL);
741 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
Christoph Hellwig2fcc3192017-03-08 10:38:53 -0800742 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
743 return -ENOSPC;
744 }
Dave Chinner9e5987a72013-02-25 12:31:26 +1100745 /*
746 * Allocation can't fail, the space was reserved.
747 */
Dave Chinner9e5987a72013-02-25 12:31:26 +1100748 ASSERT(*firstblock == NULLFSBLOCK ||
Christoph Hellwig410d17f2017-02-16 17:12:51 -0800749 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
Dave Chinner9e5987a72013-02-25 12:31:26 +1100750 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
751 cur->bc_private.b.allocated++;
752 ip->i_d.di_nblocks++;
753 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
754 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
755 /*
756 * Fill in the child block.
757 */
758 abp->b_ops = &xfs_bmbt_buf_ops;
759 ablock = XFS_BUF_TO_BLOCK(abp);
Eric Sandeenb6f41e42017-01-27 23:16:39 -0800760 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
761 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500762 XFS_BTREE_LONG_PTRS);
763
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700764 for_each_xfs_iext(ifp, &icur, &rec) {
Christoph Hellwig906abed2017-11-03 10:34:43 -0700765 if (isnullstartblock(rec.br_startblock))
766 continue;
767 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
768 xfs_bmbt_disk_set_all(arp, &rec);
769 cnt++;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100770 }
771 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
772 xfs_btree_set_numrecs(ablock, cnt);
773
774 /*
775 * Fill in the root key and pointer.
776 */
777 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
778 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
779 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
780 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
781 be16_to_cpu(block->bb_level)));
782 *pp = cpu_to_be64(args.fsbno);
783
784 /*
785 * Do all this logging at the end so that
786 * the root is at the right level.
787 */
788 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
789 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
790 ASSERT(*curp == NULL);
791 *curp = cur;
792 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
793 return 0;
794}
795
796/*
797 * Convert a local file to an extents file.
798 * This code is out of bounds for data forks of regular files,
799 * since the file data needs to get logged so things will stay consistent.
800 * (The bmap-level manipulations are ok, though).
801 */
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000802void
803xfs_bmap_local_to_extents_empty(
804 struct xfs_inode *ip,
805 int whichfork)
806{
807 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
808
Darrick J. Wong60b49842016-10-03 09:11:34 -0700809 ASSERT(whichfork != XFS_COW_FORK);
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000810 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
811 ASSERT(ifp->if_bytes == 0);
812 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
813
Eric Sandeen6a9edd32014-04-14 18:59:26 +1000814 xfs_bmap_forkoff_reset(ip, whichfork);
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000815 ifp->if_flags &= ~XFS_IFINLINE;
816 ifp->if_flags |= XFS_IFEXTENTS;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700817 ifp->if_u1.if_root = NULL;
818 ifp->if_height = 0;
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000819 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
820}
821
822
Dave Chinner9e5987a72013-02-25 12:31:26 +1100823STATIC int /* error */
824xfs_bmap_local_to_extents(
825 xfs_trans_t *tp, /* transaction pointer */
826 xfs_inode_t *ip, /* incore inode pointer */
827 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
828 xfs_extlen_t total, /* total blocks needed by transaction */
829 int *logflagsp, /* inode logging flags */
830 int whichfork,
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500831 void (*init_fn)(struct xfs_trans *tp,
832 struct xfs_buf *bp,
Dave Chinner9e5987a72013-02-25 12:31:26 +1100833 struct xfs_inode *ip,
834 struct xfs_ifork *ifp))
835{
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000836 int error = 0;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100837 int flags; /* logging flags returned */
838 xfs_ifork_t *ifp; /* inode fork pointer */
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000839 xfs_alloc_arg_t args; /* allocation arguments */
840 xfs_buf_t *bp; /* buffer for extent block */
Christoph Hellwig50bb44c2017-08-29 15:44:11 -0700841 struct xfs_bmbt_irec rec;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700842 struct xfs_iext_cursor icur;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100843
844 /*
845 * We don't want to deal with the case of keeping inode data inline yet.
846 * So sending the data fork of a regular inode is invalid.
847 */
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100848 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
Dave Chinner9e5987a72013-02-25 12:31:26 +1100849 ifp = XFS_IFORK_PTR(ip, whichfork);
850 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000851
852 if (!ifp->if_bytes) {
853 xfs_bmap_local_to_extents_empty(ip, whichfork);
854 flags = XFS_ILOG_CORE;
855 goto done;
856 }
857
Dave Chinner9e5987a72013-02-25 12:31:26 +1100858 flags = 0;
859 error = 0;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700860 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000861 memset(&args, 0, sizeof(args));
862 args.tp = tp;
863 args.mp = ip->i_mount;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000864 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000865 args.firstblock = *firstblock;
866 /*
867 * Allocate a block. We know we need only one, since the
868 * file currently fits in an inode.
869 */
870 if (*firstblock == NULLFSBLOCK) {
871 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
872 args.type = XFS_ALLOCTYPE_START_BNO;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100873 } else {
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000874 args.fsbno = *firstblock;
875 args.type = XFS_ALLOCTYPE_NEAR_BNO;
Dave Chinner9e5987a72013-02-25 12:31:26 +1100876 }
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000877 args.total = total;
878 args.minlen = args.maxlen = args.prod = 1;
879 error = xfs_alloc_vextent(&args);
880 if (error)
881 goto done;
882
883 /* Can't fail, the space was reserved. */
884 ASSERT(args.fsbno != NULLFSBLOCK);
885 ASSERT(args.len == 1);
886 *firstblock = args.fsbno;
887 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
888
Dave Chinnerfe22d552015-01-22 09:30:06 +1100889 /*
Brian Fosterb7cdc662015-10-12 15:40:24 +1100890 * Initialize the block, copy the data and log the remote buffer.
Dave Chinnerfe22d552015-01-22 09:30:06 +1100891 *
Brian Fosterb7cdc662015-10-12 15:40:24 +1100892 * The callout is responsible for logging because the remote format
893 * might differ from the local format and thus we don't know how much to
894 * log here. Note that init_fn must also set the buffer log item type
895 * correctly.
Dave Chinnerfe22d552015-01-22 09:30:06 +1100896 */
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000897 init_fn(tp, bp, ip, ifp);
898
Brian Fosterb7cdc662015-10-12 15:40:24 +1100899 /* account for the change in fork size */
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000900 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
901 xfs_bmap_local_to_extents_empty(ip, whichfork);
Dave Chinner9e5987a72013-02-25 12:31:26 +1100902 flags |= XFS_ILOG_CORE;
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000903
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700904 ifp->if_u1.if_root = NULL;
905 ifp->if_height = 0;
906
Christoph Hellwig50bb44c2017-08-29 15:44:11 -0700907 rec.br_startoff = 0;
908 rec.br_startblock = args.fsbno;
909 rec.br_blockcount = 1;
910 rec.br_state = XFS_EXT_NORM;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700911 xfs_iext_first(ifp, &icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -0700912 xfs_iext_insert(ip, &icur, &rec, 0);
Christoph Hellwig50bb44c2017-08-29 15:44:11 -0700913
Dave Chinnerf3508bc2013-07-10 07:04:00 +1000914 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
915 ip->i_d.di_nblocks = 1;
916 xfs_trans_mod_dquot_byino(tp, ip,
917 XFS_TRANS_DQ_BCOUNT, 1L);
918 flags |= xfs_ilog_fext(whichfork);
919
Dave Chinner9e5987a72013-02-25 12:31:26 +1100920done:
921 *logflagsp = flags;
922 return error;
923}
924
925/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 * Called from xfs_bmap_add_attrfork to handle btree format files.
927 */
928STATIC int /* error */
929xfs_bmap_add_attrfork_btree(
930 xfs_trans_t *tp, /* transaction pointer */
931 xfs_inode_t *ip, /* incore inode pointer */
932 xfs_fsblock_t *firstblock, /* first block allocated */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000933 struct xfs_defer_ops *dfops, /* blocks to free at commit */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 int *flags) /* inode logging flags */
935{
936 xfs_btree_cur_t *cur; /* btree cursor */
937 int error; /* error return value */
938 xfs_mount_t *mp; /* file system mount struct */
939 int stat; /* newroot status */
940
941 mp = ip->i_mount;
942 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
943 *flags |= XFS_ILOG_DBROOT;
944 else {
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100945 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000946 cur->bc_private.b.dfops = dfops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 cur->bc_private.b.firstblock = *firstblock;
Christoph Hellwigb5cfbc22017-10-17 14:16:27 -0700948 error = xfs_bmbt_lookup_first(cur, &stat);
949 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 goto error0;
Lachlan McIlroy6bd8fc82008-06-23 13:25:46 +1000951 /* must be at least one entry */
Eric Sandeenc29aad42015-02-23 22:39:08 +1100952 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
Christoph Hellwigea77b0a2008-10-30 16:57:28 +1100953 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 goto error0;
955 if (stat == 0) {
956 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
Dave Chinner24513372014-06-25 14:58:08 +1000957 return -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959 *firstblock = cur->bc_private.b.firstblock;
960 cur->bc_private.b.allocated = 0;
961 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
962 }
963 return 0;
964error0:
965 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
966 return error;
967}
968
969/*
970 * Called from xfs_bmap_add_attrfork to handle extents format files.
971 */
972STATIC int /* error */
973xfs_bmap_add_attrfork_extents(
974 xfs_trans_t *tp, /* transaction pointer */
975 xfs_inode_t *ip, /* incore inode pointer */
976 xfs_fsblock_t *firstblock, /* first block allocated */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000977 struct xfs_defer_ops *dfops, /* blocks to free at commit */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 int *flags) /* inode logging flags */
979{
980 xfs_btree_cur_t *cur; /* bmap btree cursor */
981 int error; /* error return value */
982
983 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
984 return 0;
985 cur = NULL;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000986 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 flags, XFS_DATA_FORK);
988 if (cur) {
989 cur->bc_private.b.allocated = 0;
990 xfs_btree_del_cursor(cur,
991 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
992 }
993 return error;
994}
995
996/*
Dave Chinner1e823792013-02-11 15:58:13 +1100997 * Called from xfs_bmap_add_attrfork to handle local format files. Each
998 * different data fork content type needs a different callout to do the
999 * conversion. Some are basic and only require special block initialisation
1000 * callouts for the data formating, others (directories) are so specialised they
1001 * handle everything themselves.
1002 *
1003 * XXX (dgc): investigate whether directory conversion can use the generic
1004 * formatting callout. It should be possible - it's just a very complex
Christoph Hellwigee1a47a2013-04-21 14:53:46 -05001005 * formatter.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 */
1007STATIC int /* error */
1008xfs_bmap_add_attrfork_local(
1009 xfs_trans_t *tp, /* transaction pointer */
1010 xfs_inode_t *ip, /* incore inode pointer */
1011 xfs_fsblock_t *firstblock, /* first block allocated */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001012 struct xfs_defer_ops *dfops, /* blocks to free at commit */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 int *flags) /* inode logging flags */
1014{
1015 xfs_da_args_t dargs; /* args for dir/attr code */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1018 return 0;
Dave Chinner1e823792013-02-11 15:58:13 +11001019
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001020 if (S_ISDIR(VFS_I(ip)->i_mode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 memset(&dargs, 0, sizeof(dargs));
Dave Chinnerd6cf1302014-06-06 15:14:11 +10001022 dargs.geo = ip->i_mount->m_dir_geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 dargs.dp = ip;
1024 dargs.firstblock = firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001025 dargs.dfops = dfops;
Dave Chinnerd6cf1302014-06-06 15:14:11 +10001026 dargs.total = dargs.geo->fsbcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 dargs.whichfork = XFS_DATA_FORK;
1028 dargs.trans = tp;
Dave Chinner1e823792013-02-11 15:58:13 +11001029 return xfs_dir2_sf_to_block(&dargs);
1030 }
1031
Dave Chinnerc19b3b052016-02-09 16:54:58 +11001032 if (S_ISLNK(VFS_I(ip)->i_mode))
Dave Chinner1e823792013-02-11 15:58:13 +11001033 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1034 flags, XFS_DATA_FORK,
1035 xfs_symlink_local_to_remote);
1036
Dave Chinnerf3508bc2013-07-10 07:04:00 +10001037 /* should only be called for types that support local format data */
1038 ASSERT(0);
Dave Chinner24513372014-06-25 14:58:08 +10001039 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042/*
Dave Chinner9e5987a72013-02-25 12:31:26 +11001043 * Convert inode from non-attributed to attributed.
1044 * Must not be in a transaction, ip must not be locked.
1045 */
1046int /* error code */
1047xfs_bmap_add_attrfork(
1048 xfs_inode_t *ip, /* incore inode pointer */
1049 int size, /* space new attribute needs */
1050 int rsvd) /* xact may use reserved blks */
1051{
1052 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001053 struct xfs_defer_ops dfops; /* freed extent records */
Dave Chinner9e5987a72013-02-25 12:31:26 +11001054 xfs_mount_t *mp; /* mount structure */
1055 xfs_trans_t *tp; /* transaction pointer */
1056 int blks; /* space reservation */
1057 int version = 1; /* superblock attr version */
Dave Chinner9e5987a72013-02-25 12:31:26 +11001058 int logflags; /* logging flags */
1059 int error; /* error return value */
1060
1061 ASSERT(XFS_IFORK_Q(ip) == 0);
1062
1063 mp = ip->i_mount;
1064 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
Christoph Hellwig253f4912016-04-06 09:19:55 +10001065
Dave Chinner9e5987a72013-02-25 12:31:26 +11001066 blks = XFS_ADDAFORK_SPACE_RES(mp);
Christoph Hellwig253f4912016-04-06 09:19:55 +10001067
1068 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1069 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1070 if (error)
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001071 return error;
Christoph Hellwig253f4912016-04-06 09:19:55 +10001072
Dave Chinner9e5987a72013-02-25 12:31:26 +11001073 xfs_ilock(ip, XFS_ILOCK_EXCL);
1074 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1075 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1076 XFS_QMOPT_RES_REGBLKS);
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001077 if (error)
1078 goto trans_cancel;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001079 if (XFS_IFORK_Q(ip))
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001080 goto trans_cancel;
Darrick J. Wong0f352f82016-12-05 12:38:11 +11001081 if (ip->i_d.di_anextents != 0) {
1082 error = -EFSCORRUPTED;
1083 goto trans_cancel;
1084 }
Dave Chinner9e5987a72013-02-25 12:31:26 +11001085 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1086 /*
1087 * For inodes coming from pre-6.2 filesystems.
1088 */
1089 ASSERT(ip->i_d.di_aformat == 0);
1090 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1091 }
Dave Chinner9e5987a72013-02-25 12:31:26 +11001092
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001093 xfs_trans_ijoin(tp, ip, 0);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001094 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1095
1096 switch (ip->i_d.di_format) {
1097 case XFS_DINODE_FMT_DEV:
1098 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1099 break;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001100 case XFS_DINODE_FMT_LOCAL:
1101 case XFS_DINODE_FMT_EXTENTS:
1102 case XFS_DINODE_FMT_BTREE:
1103 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1104 if (!ip->i_d.di_forkoff)
1105 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1106 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1107 version = 2;
1108 break;
1109 default:
1110 ASSERT(0);
Dave Chinner24513372014-06-25 14:58:08 +10001111 error = -EINVAL;
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001112 goto trans_cancel;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001113 }
1114
1115 ASSERT(ip->i_afp == NULL);
1116 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1117 ip->i_afp->if_flags = XFS_IFEXTENTS;
1118 logflags = 0;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001119 xfs_defer_init(&dfops, &firstblock);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001120 switch (ip->i_d.di_format) {
1121 case XFS_DINODE_FMT_LOCAL:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001122 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
Dave Chinner9e5987a72013-02-25 12:31:26 +11001123 &logflags);
1124 break;
1125 case XFS_DINODE_FMT_EXTENTS:
1126 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001127 &dfops, &logflags);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001128 break;
1129 case XFS_DINODE_FMT_BTREE:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001130 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
Dave Chinner9e5987a72013-02-25 12:31:26 +11001131 &logflags);
1132 break;
1133 default:
1134 error = 0;
1135 break;
1136 }
1137 if (logflags)
1138 xfs_trans_log_inode(tp, ip, logflags);
1139 if (error)
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001140 goto bmap_cancel;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001141 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1142 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
Dave Chinner61e63ec2015-01-22 09:10:31 +11001143 bool log_sb = false;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001144
1145 spin_lock(&mp->m_sb_lock);
1146 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1147 xfs_sb_version_addattr(&mp->m_sb);
Dave Chinner61e63ec2015-01-22 09:10:31 +11001148 log_sb = true;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001149 }
1150 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1151 xfs_sb_version_addattr2(&mp->m_sb);
Dave Chinner61e63ec2015-01-22 09:10:31 +11001152 log_sb = true;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001153 }
Dave Chinner4d11a402015-01-22 09:10:26 +11001154 spin_unlock(&mp->m_sb_lock);
Dave Chinner61e63ec2015-01-22 09:10:31 +11001155 if (log_sb)
1156 xfs_log_sb(tp);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001157 }
1158
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07001159 error = xfs_defer_finish(&tp, &dfops);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001160 if (error)
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001161 goto bmap_cancel;
Christoph Hellwig70393312015-06-04 13:48:08 +10001162 error = xfs_trans_commit(tp);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001163 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001164 return error;
1165
1166bmap_cancel:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001167 xfs_defer_cancel(&dfops);
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001168trans_cancel:
Christoph Hellwig4906e212015-06-04 13:47:56 +10001169 xfs_trans_cancel(tp);
Mark Tinguely9e3908e2013-11-07 15:43:28 -06001170 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001171 return error;
1172}
1173
1174/*
1175 * Internal and external extent tree search functions.
1176 */
1177
1178/*
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001179 * Read in extents from a btree-format inode.
Dave Chinner9e5987a72013-02-25 12:31:26 +11001180 */
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001181int
1182xfs_iread_extents(
1183 struct xfs_trans *tp,
1184 struct xfs_inode *ip,
1185 int whichfork)
Dave Chinner9e5987a72013-02-25 12:31:26 +11001186{
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001187 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwige8e0e172017-10-19 11:06:29 -07001188 int state = xfs_bmap_fork_to_state(whichfork);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001189 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1190 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1191 struct xfs_btree_block *block = ifp->if_broot;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001192 struct xfs_iext_cursor icur;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -07001193 struct xfs_bmbt_irec new;
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001194 xfs_fsblock_t bno;
1195 struct xfs_buf *bp;
1196 xfs_extnum_t i, j;
1197 int level;
1198 __be64 *pp;
1199 int error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001200
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001201 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1202
1203 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1204 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1205 return -EFSCORRUPTED;
1206 }
1207
Dave Chinner9e5987a72013-02-25 12:31:26 +11001208 /*
1209 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1210 */
1211 level = be16_to_cpu(block->bb_level);
1212 ASSERT(level > 0);
1213 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1214 bno = be64_to_cpu(*pp);
Darrick J. Wongd5a91ba2017-02-02 15:13:58 -08001215
Dave Chinner9e5987a72013-02-25 12:31:26 +11001216 /*
1217 * Go down the tree until leaf level is reached, following the first
1218 * pointer (leftmost) at each level.
1219 */
1220 while (level-- > 0) {
1221 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1222 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1223 if (error)
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001224 goto out;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001225 block = XFS_BUF_TO_BLOCK(bp);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001226 if (level == 0)
1227 break;
1228 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1229 bno = be64_to_cpu(*pp);
Eric Sandeenc29aad42015-02-23 22:39:08 +11001230 XFS_WANT_CORRUPTED_GOTO(mp,
Darrick J. Wong59f6fec2018-01-08 10:51:00 -08001231 xfs_verify_fsbno(mp, bno), out_brelse);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001232 xfs_trans_brelse(tp, bp);
1233 }
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001234
Dave Chinner9e5987a72013-02-25 12:31:26 +11001235 /*
1236 * Here with bp and block set to the leftmost leaf node in the tree.
1237 */
Dave Chinner9e5987a72013-02-25 12:31:26 +11001238 i = 0;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001239 xfs_iext_first(ifp, &icur);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001240
Dave Chinner9e5987a72013-02-25 12:31:26 +11001241 /*
1242 * Loop over all leaf nodes. Copy information to the extent records.
1243 */
1244 for (;;) {
1245 xfs_bmbt_rec_t *frp;
1246 xfs_fsblock_t nextbno;
1247 xfs_extnum_t num_recs;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001248
1249 num_recs = xfs_btree_get_numrecs(block);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001250 if (unlikely(i + num_recs > nextents)) {
1251 ASSERT(i + num_recs <= nextents);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001252 xfs_warn(ip->i_mount,
1253 "corrupt dinode %Lu, (btree extents).",
1254 (unsigned long long) ip->i_ino);
Darrick J. Wong90a58f92018-03-23 10:06:52 -07001255 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1256 __func__, block, sizeof(*block),
1257 __this_address);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001258 error = -EFSCORRUPTED;
1259 goto out_brelse;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001260 }
Dave Chinner9e5987a72013-02-25 12:31:26 +11001261 /*
1262 * Read-ahead the next leaf block, if any.
1263 */
1264 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1265 if (nextbno != NULLFSBLOCK)
1266 xfs_btree_reada_bufl(mp, nextbno, 1,
1267 &xfs_bmbt_buf_ops);
1268 /*
1269 * Copy records into the extent records.
1270 */
1271 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
Christoph Hellwig6bdcf262017-11-03 10:34:46 -07001272 for (j = 0; j < num_recs; j++, frp++, i++) {
Darrick J. Wong30b09842018-03-23 10:06:52 -07001273 xfs_failaddr_t fa;
1274
Christoph Hellwigdac9c9b2017-11-03 10:34:47 -07001275 xfs_bmbt_disk_get_all(frp, &new);
Darrick J. Wong30b09842018-03-23 10:06:52 -07001276 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1277 if (fa) {
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001278 error = -EFSCORRUPTED;
Darrick J. Wong30b09842018-03-23 10:06:52 -07001279 xfs_inode_verifier_error(ip, error,
1280 "xfs_iread_extents(2)",
1281 frp, sizeof(*frp), fa);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001282 goto out_brelse;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001283 }
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07001284 xfs_iext_insert(ip, &icur, &new, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001285 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1286 xfs_iext_next(ifp, &icur);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001287 }
1288 xfs_trans_brelse(tp, bp);
1289 bno = nextbno;
1290 /*
1291 * If we've reached the end, stop.
1292 */
1293 if (bno == NULLFSBLOCK)
1294 break;
1295 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1296 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1297 if (error)
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001298 goto out;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001299 block = XFS_BUF_TO_BLOCK(bp);
1300 }
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001301
1302 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1303 error = -EFSCORRUPTED;
1304 goto out;
1305 }
Eric Sandeen5d829302016-11-08 12:59:42 +11001306 ASSERT(i == xfs_iext_count(ifp));
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001307
1308 ifp->if_flags |= XFS_IFEXTENTS;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001309 return 0;
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001310
1311out_brelse:
Dave Chinner9e5987a72013-02-25 12:31:26 +11001312 xfs_trans_brelse(tp, bp);
Christoph Hellwig211e95b2017-10-23 16:32:39 -07001313out:
1314 xfs_iext_destroy(ifp);
1315 return error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001316}
1317
Dave Chinner9e5987a72013-02-25 12:31:26 +11001318/*
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001319 * Returns the relative block number of the first unused block(s) in the given
1320 * fork with at least "len" logically contiguous blocks free. This is the
1321 * lowest-address hole if the fork has holes, else the first block past the end
1322 * of fork. Return 0 if the fork is currently local (in-inode).
Dave Chinner9e5987a72013-02-25 12:31:26 +11001323 */
1324int /* error */
1325xfs_bmap_first_unused(
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001326 struct xfs_trans *tp, /* transaction pointer */
1327 struct xfs_inode *ip, /* incore inode */
1328 xfs_extlen_t len, /* size of hole to find */
1329 xfs_fileoff_t *first_unused, /* unused block */
1330 int whichfork) /* data or attr fork */
Dave Chinner9e5987a72013-02-25 12:31:26 +11001331{
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001332 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1333 struct xfs_bmbt_irec got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001334 struct xfs_iext_cursor icur;
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001335 xfs_fileoff_t lastaddr = 0;
1336 xfs_fileoff_t lowest, max;
1337 int error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001338
1339 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1340 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1341 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001342
Dave Chinner9e5987a72013-02-25 12:31:26 +11001343 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1344 *first_unused = 0;
1345 return 0;
1346 }
Christoph Hellwigf2285c12017-08-29 15:44:12 -07001347
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001348 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1349 error = xfs_iread_extents(tp, ip, whichfork);
1350 if (error)
1351 return error;
1352 }
Christoph Hellwigf2285c12017-08-29 15:44:12 -07001353
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001354 lowest = max = *first_unused;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001355 for_each_xfs_iext(ifp, &icur, &got) {
Dave Chinner9e5987a72013-02-25 12:31:26 +11001356 /*
1357 * See if the hole before this extent will work.
1358 */
Christoph Hellwigf2285c12017-08-29 15:44:12 -07001359 if (got.br_startoff >= lowest + len &&
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001360 got.br_startoff - max >= len)
1361 break;
Christoph Hellwigf2285c12017-08-29 15:44:12 -07001362 lastaddr = got.br_startoff + got.br_blockcount;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001363 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1364 }
Christoph Hellwig29b3e942017-10-19 11:08:52 -07001365
Dave Chinner9e5987a72013-02-25 12:31:26 +11001366 *first_unused = max;
1367 return 0;
1368}
1369
1370/*
Zhi Yong Wu02bb4872013-08-12 03:14:54 +00001371 * Returns the file-relative block number of the last block - 1 before
Dave Chinner9e5987a72013-02-25 12:31:26 +11001372 * last_block (input value) in the file.
1373 * This is not based on i_size, it is based on the extent records.
1374 * Returns 0 for local files, as they do not have extent records.
1375 */
1376int /* error */
1377xfs_bmap_last_before(
Christoph Hellwig86685f72016-11-24 11:39:38 +11001378 struct xfs_trans *tp, /* transaction pointer */
1379 struct xfs_inode *ip, /* incore inode */
1380 xfs_fileoff_t *last_block, /* last block */
1381 int whichfork) /* data or attr fork */
Dave Chinner9e5987a72013-02-25 12:31:26 +11001382{
Christoph Hellwig86685f72016-11-24 11:39:38 +11001383 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1384 struct xfs_bmbt_irec got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001385 struct xfs_iext_cursor icur;
Christoph Hellwig86685f72016-11-24 11:39:38 +11001386 int error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001387
Christoph Hellwig86685f72016-11-24 11:39:38 +11001388 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1389 case XFS_DINODE_FMT_LOCAL:
Dave Chinner9e5987a72013-02-25 12:31:26 +11001390 *last_block = 0;
1391 return 0;
Christoph Hellwig86685f72016-11-24 11:39:38 +11001392 case XFS_DINODE_FMT_BTREE:
1393 case XFS_DINODE_FMT_EXTENTS:
1394 break;
1395 default:
1396 return -EIO;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001397 }
Christoph Hellwig86685f72016-11-24 11:39:38 +11001398
1399 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1400 error = xfs_iread_extents(tp, ip, whichfork);
1401 if (error)
1402 return error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001403 }
Christoph Hellwig86685f72016-11-24 11:39:38 +11001404
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001405 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
Christoph Hellwigdc560152017-10-23 16:32:39 -07001406 *last_block = 0;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001407 return 0;
1408}
1409
Dave Chinner68988112013-08-12 20:49:42 +10001410int
Dave Chinner9e5987a72013-02-25 12:31:26 +11001411xfs_bmap_last_extent(
1412 struct xfs_trans *tp,
1413 struct xfs_inode *ip,
1414 int whichfork,
1415 struct xfs_bmbt_irec *rec,
1416 int *is_empty)
1417{
1418 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001419 struct xfs_iext_cursor icur;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001420 int error;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001421
1422 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1423 error = xfs_iread_extents(tp, ip, whichfork);
1424 if (error)
1425 return error;
1426 }
1427
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001428 xfs_iext_last(ifp, &icur);
1429 if (!xfs_iext_get_extent(ifp, &icur, rec))
Dave Chinner9e5987a72013-02-25 12:31:26 +11001430 *is_empty = 1;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001431 else
1432 *is_empty = 0;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001433 return 0;
1434}
1435
1436/*
1437 * Check the last inode extent to determine whether this allocation will result
1438 * in blocks being allocated at the end of the file. When we allocate new data
1439 * blocks at the end of the file which do not start at the previous data block,
1440 * we will try to align the new blocks at stripe unit boundaries.
1441 *
Dave Chinner6e708bc2013-11-22 10:41:16 +11001442 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
Dave Chinner9e5987a72013-02-25 12:31:26 +11001443 * at, or past the EOF.
1444 */
1445STATIC int
1446xfs_bmap_isaeof(
1447 struct xfs_bmalloca *bma,
1448 int whichfork)
1449{
1450 struct xfs_bmbt_irec rec;
1451 int is_empty;
1452 int error;
1453
Thomas Meyer749f24f2017-10-09 11:38:54 -07001454 bma->aeof = false;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001455 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1456 &is_empty);
Dave Chinner6e708bc2013-11-22 10:41:16 +11001457 if (error)
Dave Chinner9e5987a72013-02-25 12:31:26 +11001458 return error;
1459
Dave Chinner6e708bc2013-11-22 10:41:16 +11001460 if (is_empty) {
Thomas Meyer749f24f2017-10-09 11:38:54 -07001461 bma->aeof = true;
Dave Chinner6e708bc2013-11-22 10:41:16 +11001462 return 0;
1463 }
1464
Dave Chinner9e5987a72013-02-25 12:31:26 +11001465 /*
1466 * Check if we are allocation or past the last extent, or at least into
1467 * the last delayed allocated extent.
1468 */
1469 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1470 (bma->offset >= rec.br_startoff &&
1471 isnullstartblock(rec.br_startblock));
1472 return 0;
1473}
1474
1475/*
Dave Chinner9e5987a72013-02-25 12:31:26 +11001476 * Returns the file-relative block number of the first block past eof in
1477 * the file. This is not based on i_size, it is based on the extent records.
1478 * Returns 0 for local files, as they do not have extent records.
1479 */
1480int
1481xfs_bmap_last_offset(
Dave Chinner9e5987a72013-02-25 12:31:26 +11001482 struct xfs_inode *ip,
1483 xfs_fileoff_t *last_block,
1484 int whichfork)
1485{
1486 struct xfs_bmbt_irec rec;
1487 int is_empty;
1488 int error;
1489
1490 *last_block = 0;
1491
1492 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1493 return 0;
1494
1495 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
Dave Chinner24513372014-06-25 14:58:08 +10001497 return -EIO;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001498
1499 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1500 if (error || is_empty)
1501 return error;
1502
1503 *last_block = rec.br_startoff + rec.br_blockcount;
1504 return 0;
1505}
1506
1507/*
1508 * Returns whether the selected fork of the inode has exactly one
1509 * block or not. For the data fork we check this matches di_size,
1510 * implying the file's range is 0..bsize-1.
1511 */
1512int /* 1=>1 block, 0=>otherwise */
1513xfs_bmap_one_block(
1514 xfs_inode_t *ip, /* incore inode */
1515 int whichfork) /* data or attr fork */
1516{
Dave Chinner9e5987a72013-02-25 12:31:26 +11001517 xfs_ifork_t *ifp; /* inode fork pointer */
1518 int rval; /* return value */
1519 xfs_bmbt_irec_t s; /* internal version of extent */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001520 struct xfs_iext_cursor icur;
Dave Chinner9e5987a72013-02-25 12:31:26 +11001521
1522#ifndef DEBUG
1523 if (whichfork == XFS_DATA_FORK)
1524 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1525#endif /* !DEBUG */
1526 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1527 return 0;
1528 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1529 return 0;
1530 ifp = XFS_IFORK_PTR(ip, whichfork);
1531 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001532 xfs_iext_first(ifp, &icur);
1533 xfs_iext_get_extent(ifp, &icur, &s);
Dave Chinner9e5987a72013-02-25 12:31:26 +11001534 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1535 if (rval && whichfork == XFS_DATA_FORK)
1536 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1537 return rval;
1538}
1539
1540/*
1541 * Extent tree manipulation functions used during allocation.
1542 */
1543
1544/*
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001545 * Convert a delayed allocation to a real allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 */
1547STATIC int /* error */
1548xfs_bmap_add_extent_delay_real(
Darrick J. Wong60b49842016-10-03 09:11:34 -07001549 struct xfs_bmalloca *bma,
1550 int whichfork)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001552 struct xfs_bmbt_irec *new = &bma->got;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 int error; /* error return value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 int i; /* temp state */
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11001555 xfs_ifork_t *ifp; /* inode fork pointer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 xfs_fileoff_t new_endoff; /* end offset of new entry */
1557 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1558 /* left is 0, right is 1, prev is 2 */
1559 int rval=0; /* return value (logging flags) */
Christoph Hellwig060ea652017-10-19 11:02:29 -07001560 int state = xfs_bmap_fork_to_state(whichfork);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001561 xfs_filblks_t da_new; /* new count del alloc blocks used */
1562 xfs_filblks_t da_old; /* old count del alloc blocks used */
1563 xfs_filblks_t temp=0; /* value for da_new calculations */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 int tmp_rval; /* partial logging flags */
Eric Sandeenc29aad42015-02-23 22:39:08 +11001565 struct xfs_mount *mp;
Darrick J. Wong60b49842016-10-03 09:11:34 -07001566 xfs_extnum_t *nextents;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001567 struct xfs_bmbt_irec old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Eric Sandeenf1f96c42016-01-04 16:10:42 +11001569 mp = bma->ip->i_mount;
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001570 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
Darrick J. Wong60b49842016-10-03 09:11:34 -07001571 ASSERT(whichfork != XFS_ATTR_FORK);
1572 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1573 &bma->ip->i_d.di_nextents);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001574
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001575 ASSERT(!isnullstartblock(new->br_startblock));
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001576 ASSERT(!bma->cur ||
1577 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001578
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11001579 XFS_STATS_INC(mp, xs_add_exlist);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581#define LEFT r[0]
1582#define RIGHT r[1]
1583#define PREV r[2]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
1585 /*
1586 * Set up a bunch of variables to make the tests simpler.
1587 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001588 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 new_endoff = new->br_startoff + new->br_blockcount;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001590 ASSERT(isnullstartblock(PREV.br_startblock));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 ASSERT(PREV.br_startoff <= new->br_startoff);
1592 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001593
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00001594 da_old = startblockval(PREV.br_startblock);
1595 da_new = 0;
1596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 /*
1598 * Set flags determining what part of the previous delayed allocation
1599 * extent is being replaced by a real allocation.
1600 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001601 if (PREV.br_startoff == new->br_startoff)
1602 state |= BMAP_LEFT_FILLING;
1603 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1604 state |= BMAP_RIGHT_FILLING;
1605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 /*
1607 * Check and set flags if this segment has a left neighbor.
1608 * Don't set contiguous if the combined extent would be too large.
1609 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001610 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001611 state |= BMAP_LEFT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001612 if (isnullstartblock(LEFT.br_startblock))
1613 state |= BMAP_LEFT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001615
1616 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1617 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1618 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1619 LEFT.br_state == new->br_state &&
1620 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1621 state |= BMAP_LEFT_CONTIG;
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 /*
1624 * Check and set flags if this segment has a right neighbor.
1625 * Don't set contiguous if the combined extent would be too large.
1626 * Also check for all-three-contiguous being too large.
1627 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001628 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001629 state |= BMAP_RIGHT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001630 if (isnullstartblock(RIGHT.br_startblock))
1631 state |= BMAP_RIGHT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001633
1634 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1635 new_endoff == RIGHT.br_startoff &&
1636 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1637 new->br_state == RIGHT.br_state &&
1638 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1639 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1640 BMAP_RIGHT_FILLING)) !=
1641 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1642 BMAP_RIGHT_FILLING) ||
1643 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1644 <= MAXEXTLEN))
1645 state |= BMAP_RIGHT_CONTIG;
1646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 error = 0;
1648 /*
1649 * Switch out based on the FILLING and CONTIG state bits.
1650 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001651 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1652 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1653 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1654 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 /*
1656 * Filling in all of a previously delayed allocation extent.
1657 * The left and right neighbors are both contiguous with new.
1658 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001659 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001660
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07001661 xfs_iext_remove(bma->ip, &bma->icur, state);
1662 xfs_iext_remove(bma->ip, &bma->icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001663 xfs_iext_prev(ifp, &bma->icur);
1664 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
Darrick J. Wong60b49842016-10-03 09:11:34 -07001665 (*nextents)--;
Christoph Hellwig0d045542017-11-03 10:34:39 -07001666
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001667 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1669 else {
1670 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001671 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001672 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001674 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001675 error = xfs_btree_delete(bma->cur, &i);
1676 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001678 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001679 error = xfs_btree_decrement(bma->cur, 0, &i);
1680 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001682 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07001683 error = xfs_bmbt_update(bma->cur, &LEFT);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001684 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 goto done;
1686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 break;
1688
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001689 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 /*
1691 * Filling in all of a previously delayed allocation extent.
1692 * The left neighbor is contiguous, the right is not.
1693 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001694 old = LEFT;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001695 LEFT.br_blockcount += PREV.br_blockcount;
Christoph Hellwig0d045542017-11-03 10:34:39 -07001696
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07001697 xfs_iext_remove(bma->ip, &bma->icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001698 xfs_iext_prev(ifp, &bma->icur);
1699 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
Christoph Hellwigec90c552011-05-23 08:52:53 +00001700
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001701 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 rval = XFS_ILOG_DEXT;
1703 else {
1704 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001705 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001706 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001708 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07001709 error = xfs_bmbt_update(bma->cur, &LEFT);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001710 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 goto done;
1712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 break;
1714
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001715 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 /*
1717 * Filling in all of a previously delayed allocation extent.
1718 * The right neighbor is contiguous, the left is not.
1719 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001720 PREV.br_startblock = new->br_startblock;
1721 PREV.br_blockcount += RIGHT.br_blockcount;
Christoph Hellwig0d045542017-11-03 10:34:39 -07001722
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001723 xfs_iext_next(ifp, &bma->icur);
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07001724 xfs_iext_remove(bma->ip, &bma->icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001725 xfs_iext_prev(ifp, &bma->icur);
1726 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001727
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001728 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 rval = XFS_ILOG_DEXT;
1730 else {
1731 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001732 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001733 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001735 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07001736 error = xfs_bmbt_update(bma->cur, &PREV);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001737 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 goto done;
1739 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 break;
1741
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001742 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 /*
1744 * Filling in all of a previously delayed allocation extent.
1745 * Neither the left nor right neighbors are contiguous with
1746 * the new one.
1747 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001748 PREV.br_startblock = new->br_startblock;
1749 PREV.br_state = new->br_state;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001750 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001751
Darrick J. Wong60b49842016-10-03 09:11:34 -07001752 (*nextents)++;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001753 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1755 else {
1756 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001757 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001758 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001760 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001761 error = xfs_btree_insert(bma->cur, &i);
1762 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001764 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 break;
1767
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001768 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 /*
1770 * Filling in the first part of a previous delayed allocation.
1771 * The left neighbor is contiguous.
1772 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001773 old = LEFT;
1774 temp = PREV.br_blockcount - new->br_blockcount;
1775 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1776 startblockval(PREV.br_startblock));
1777
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001778 LEFT.br_blockcount += new->br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00001779
Christoph Hellwigbf999712017-11-03 10:34:38 -07001780 PREV.br_blockcount = temp;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001781 PREV.br_startoff += new->br_blockcount;
1782 PREV.br_startblock = nullstartblock(da_new);
Christoph Hellwig0d045542017-11-03 10:34:39 -07001783
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001784 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1785 xfs_iext_prev(ifp, &bma->icur);
1786 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001787
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001788 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 rval = XFS_ILOG_DEXT;
1790 else {
1791 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001792 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001793 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001795 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07001796 error = xfs_bmbt_update(bma->cur, &LEFT);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001797 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 goto done;
1799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 break;
1801
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001802 case BMAP_LEFT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 /*
1804 * Filling in the first part of a previous delayed allocation.
1805 * The left neighbor is not contiguous.
1806 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001807 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
Darrick J. Wong60b49842016-10-03 09:11:34 -07001808 (*nextents)++;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001809 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1811 else {
1812 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001813 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001814 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001816 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001817 error = xfs_btree_insert(bma->cur, &i);
1818 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001820 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 }
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00001822
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001823 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001824 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001825 bma->firstblock, bma->dfops,
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001826 &bma->cur, 1, &tmp_rval, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 rval |= tmp_rval;
1828 if (error)
1829 goto done;
1830 }
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001831
1832 temp = PREV.br_blockcount - new->br_blockcount;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001833 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
Eric Sandeen9d87c312009-01-14 23:22:07 -06001834 startblockval(PREV.br_startblock) -
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001835 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001836
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001837 PREV.br_startoff = new_endoff;
1838 PREV.br_blockcount = temp;
1839 PREV.br_startblock = nullstartblock(da_new);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001840 xfs_iext_next(ifp, &bma->icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07001841 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001842 xfs_iext_prev(ifp, &bma->icur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 break;
1844
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001845 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 /*
1847 * Filling in the last part of a previous delayed allocation.
1848 * The right neighbor is contiguous with the new allocation.
1849 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001850 old = RIGHT;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001851 RIGHT.br_startoff = new->br_startoff;
1852 RIGHT.br_startblock = new->br_startblock;
1853 RIGHT.br_blockcount += new->br_blockcount;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001854
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001855 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 rval = XFS_ILOG_DEXT;
1857 else {
1858 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001859 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001860 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001862 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07001863 error = xfs_bmbt_update(bma->cur, &RIGHT);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001864 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 goto done;
1866 }
Christoph Hellwigec90c552011-05-23 08:52:53 +00001867
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001868 temp = PREV.br_blockcount - new->br_blockcount;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001869 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
Eric Sandeen9d87c312009-01-14 23:22:07 -06001870 startblockval(PREV.br_startblock));
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001871
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001872 PREV.br_blockcount = temp;
1873 PREV.br_startblock = nullstartblock(da_new);
Christoph Hellwigec90c552011-05-23 08:52:53 +00001874
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001875 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1876 xfs_iext_next(ifp, &bma->icur);
1877 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 break;
1879
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001880 case BMAP_RIGHT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 /*
1882 * Filling in the last part of a previous delayed allocation.
1883 * The right neighbor is not contiguous.
1884 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001885 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
Darrick J. Wong60b49842016-10-03 09:11:34 -07001886 (*nextents)++;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001887 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1889 else {
1890 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001891 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001892 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001894 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001895 error = xfs_btree_insert(bma->cur, &i);
1896 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001898 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00001900
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001901 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001902 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001903 bma->firstblock, bma->dfops, &bma->cur, 1,
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001904 &tmp_rval, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 rval |= tmp_rval;
1906 if (error)
1907 goto done;
1908 }
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001909
1910 temp = PREV.br_blockcount - new->br_blockcount;
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001911 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
Eric Sandeen9d87c312009-01-14 23:22:07 -06001912 startblockval(PREV.br_startblock) -
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001913 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001914
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001915 PREV.br_startblock = nullstartblock(da_new);
1916 PREV.br_blockcount = temp;
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07001917 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001918 xfs_iext_next(ifp, &bma->icur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 break;
1920
1921 case 0:
1922 /*
1923 * Filling in the middle part of a previous delayed allocation.
1924 * Contiguity is impossible here.
1925 * This case is avoided almost all the time.
bpm@sgi.com24446fc2011-01-19 17:41:58 +00001926 *
1927 * We start with a delayed allocation:
1928 *
1929 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1930 * PREV @ idx
1931 *
1932 * and we are allocating:
1933 * +rrrrrrrrrrrrrrrrr+
1934 * new
1935 *
1936 * and we set it up for insertion as:
1937 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1938 * new
1939 * PREV @ idx LEFT RIGHT
1940 * inserted at idx + 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001942 old = PREV;
1943
1944 /* LEFT is the new middle */
bpm@sgi.com24446fc2011-01-19 17:41:58 +00001945 LEFT = *new;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001946
1947 /* RIGHT is the new right */
bpm@sgi.com24446fc2011-01-19 17:41:58 +00001948 RIGHT.br_state = PREV.br_state;
bpm@sgi.com24446fc2011-01-19 17:41:58 +00001949 RIGHT.br_startoff = new_endoff;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001950 RIGHT.br_blockcount =
1951 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1952 RIGHT.br_startblock =
1953 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1954 RIGHT.br_blockcount));
1955
1956 /* truncate PREV */
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001957 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1958 PREV.br_startblock =
1959 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1960 PREV.br_blockcount));
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001961 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001962
Christoph Hellwigb2b17122017-11-03 10:34:43 -07001963 xfs_iext_next(ifp, &bma->icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07001964 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1965 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
Darrick J. Wong60b49842016-10-03 09:11:34 -07001966 (*nextents)++;
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001967
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001968 if (bma->cur == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1970 else {
1971 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07001972 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001973 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001975 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001976 error = xfs_btree_insert(bma->cur, &i);
1977 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11001979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00001981
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001982 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00001983 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10001984 bma->firstblock, bma->dfops, &bma->cur,
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11001985 1, &tmp_rval, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 rval |= tmp_rval;
1987 if (error)
1988 goto done;
1989 }
Christoph Hellwig4dcb88692017-10-17 14:16:24 -07001990
1991 da_new = startblockval(PREV.br_startblock) +
1992 startblockval(RIGHT.br_startblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 break;
1994
Christoph Hellwig7574aa92009-11-25 00:00:19 +00001995 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1996 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1997 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1998 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1999 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2000 case BMAP_LEFT_CONTIG:
2001 case BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /*
2003 * These cases are all impossible.
2004 */
2005 ASSERT(0);
2006 }
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002007
Darrick J. Wong95eb3082018-05-09 10:02:32 -07002008 /* add reverse mapping unless caller opted out */
2009 if (!(bma->flags & XFS_BMAPI_NORMAP)) {
2010 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip,
2011 whichfork, new);
2012 if (error)
2013 goto done;
2014 }
Darrick J. Wong9c194642016-08-03 12:16:05 +10002015
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002016 /* convert to a btree if necessary */
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11002017 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002018 int tmp_logflags; /* partial log flag return val */
2019
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00002020 ASSERT(bma->cur == NULL);
2021 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002022 bma->firstblock, bma->dfops, &bma->cur,
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11002023 da_old > 0, &tmp_logflags, whichfork);
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00002024 bma->logflags |= tmp_logflags;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002025 if (error)
2026 goto done;
2027 }
2028
Christoph Hellwigca1862b2017-10-17 14:16:25 -07002029 if (bma->cur) {
2030 da_new += bma->cur->bc_private.b.allocated;
2031 bma->cur->bc_private.b.allocated = 0;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002032 }
2033
Christoph Hellwigca1862b2017-10-17 14:16:25 -07002034 /* adjust for changes in reserved delayed indirect blocks */
2035 if (da_new != da_old) {
2036 ASSERT(state == 0 || da_new < da_old);
2037 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2038 false);
2039 }
Christoph Hellwig572a4cf2011-09-18 20:41:04 +00002040
Darrick J. Wong6d3eb1e2016-01-04 16:12:42 +11002041 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042done:
Darrick J. Wong60b49842016-10-03 09:11:34 -07002043 if (whichfork != XFS_COW_FORK)
2044 bma->logflags |= rval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 return error;
2046#undef LEFT
2047#undef RIGHT
2048#undef PREV
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
2050
2051/*
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002052 * Convert an unwritten allocation to a real allocation or vice versa.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 */
2054STATIC int /* error */
2055xfs_bmap_add_extent_unwritten_real(
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002056 struct xfs_trans *tp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 xfs_inode_t *ip, /* incore inode pointer */
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002058 int whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002059 struct xfs_iext_cursor *icur,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11002061 xfs_bmbt_irec_t *new, /* new data to add to file extents */
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002062 xfs_fsblock_t *first, /* pointer to firstblock variable */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002063 struct xfs_defer_ops *dfops, /* list of extents to be freed */
Christoph Hellwigb4e91812010-06-23 18:11:15 +10002064 int *logflagsp) /* inode logging flags */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 xfs_btree_cur_t *cur; /* btree cursor */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 int error; /* error return value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 int i; /* temp state */
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11002069 xfs_ifork_t *ifp; /* inode fork pointer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 xfs_fileoff_t new_endoff; /* end offset of new entry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2072 /* left is 0, right is 1, prev is 2 */
2073 int rval=0; /* return value (logging flags) */
Christoph Hellwig060ea652017-10-19 11:02:29 -07002074 int state = xfs_bmap_fork_to_state(whichfork);
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002075 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002076 struct xfs_bmbt_irec old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002078 *logflagsp = 0;
2079
2080 cur = *curp;
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002081 ifp = XFS_IFORK_PTR(ip, whichfork);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002082
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002083 ASSERT(!isnullstartblock(new->br_startblock));
2084
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11002085 XFS_STATS_INC(mp, xs_add_exlist);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087#define LEFT r[0]
2088#define RIGHT r[1]
2089#define PREV r[2]
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 /*
2092 * Set up a bunch of variables to make the tests simpler.
2093 */
2094 error = 0;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002095 xfs_iext_get_extent(ifp, icur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002096 ASSERT(new->br_state != PREV.br_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 new_endoff = new->br_startoff + new->br_blockcount;
2098 ASSERT(PREV.br_startoff <= new->br_startoff);
2099 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 /*
2102 * Set flags determining what part of the previous oldext allocation
2103 * extent is being replaced by a newext allocation.
2104 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002105 if (PREV.br_startoff == new->br_startoff)
2106 state |= BMAP_LEFT_FILLING;
2107 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2108 state |= BMAP_RIGHT_FILLING;
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 /*
2111 * Check and set flags if this segment has a left neighbor.
2112 * Don't set contiguous if the combined extent would be too large.
2113 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002114 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002115 state |= BMAP_LEFT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002116 if (isnullstartblock(LEFT.br_startblock))
2117 state |= BMAP_LEFT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002119
2120 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2121 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2122 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002123 LEFT.br_state == new->br_state &&
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002124 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2125 state |= BMAP_LEFT_CONTIG;
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 /*
2128 * Check and set flags if this segment has a right neighbor.
2129 * Don't set contiguous if the combined extent would be too large.
2130 * Also check for all-three-contiguous being too large.
2131 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002132 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002133 state |= BMAP_RIGHT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002134 if (isnullstartblock(RIGHT.br_startblock))
2135 state |= BMAP_RIGHT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002137
2138 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2139 new_endoff == RIGHT.br_startoff &&
2140 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002141 new->br_state == RIGHT.br_state &&
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002142 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2143 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2144 BMAP_RIGHT_FILLING)) !=
2145 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2146 BMAP_RIGHT_FILLING) ||
2147 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2148 <= MAXEXTLEN))
2149 state |= BMAP_RIGHT_CONTIG;
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 /*
2152 * Switch out based on the FILLING and CONTIG state bits.
2153 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002154 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2155 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2156 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2157 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 /*
2159 * Setting all of a previous oldext extent to newext.
2160 * The left and right neighbors are both contiguous with new.
2161 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002162 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002163
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07002164 xfs_iext_remove(ip, icur, state);
2165 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002166 xfs_iext_prev(ifp, icur);
2167 xfs_iext_update_extent(ip, state, icur, &LEFT);
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002168 XFS_IFORK_NEXT_SET(ip, whichfork,
2169 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 if (cur == NULL)
2171 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2172 else {
2173 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002174 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2175 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002177 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11002178 if ((error = xfs_btree_delete(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002180 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig8df4da42008-10-30 16:55:58 +11002181 if ((error = xfs_btree_decrement(cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002183 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11002184 if ((error = xfs_btree_delete(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002186 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig8df4da42008-10-30 16:55:58 +11002187 if ((error = xfs_btree_decrement(cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002189 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002190 error = xfs_bmbt_update(cur, &LEFT);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002191 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 goto done;
2193 }
2194 break;
2195
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002196 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 /*
2198 * Setting all of a previous oldext extent to newext.
2199 * The left neighbor is contiguous, the right is not.
2200 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002201 LEFT.br_blockcount += PREV.br_blockcount;
Christoph Hellwigec90c552011-05-23 08:52:53 +00002202
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07002203 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002204 xfs_iext_prev(ifp, icur);
2205 xfs_iext_update_extent(ip, state, icur, &LEFT);
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002206 XFS_IFORK_NEXT_SET(ip, whichfork,
2207 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 if (cur == NULL)
2209 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2210 else {
2211 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002212 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2213 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002215 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11002216 if ((error = xfs_btree_delete(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002218 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig8df4da42008-10-30 16:55:58 +11002219 if ((error = xfs_btree_decrement(cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002221 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002222 error = xfs_bmbt_update(cur, &LEFT);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002223 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 goto done;
2225 }
2226 break;
2227
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002228 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 /*
2230 * Setting all of a previous oldext extent to newext.
2231 * The right neighbor is contiguous, the left is not.
2232 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002233 PREV.br_blockcount += RIGHT.br_blockcount;
2234 PREV.br_state = new->br_state;
Christoph Hellwiga6818472017-11-03 10:34:40 -07002235
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002236 xfs_iext_next(ifp, icur);
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07002237 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002238 xfs_iext_prev(ifp, icur);
2239 xfs_iext_update_extent(ip, state, icur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002240
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002241 XFS_IFORK_NEXT_SET(ip, whichfork,
2242 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 if (cur == NULL)
2244 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2245 else {
2246 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002247 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2248 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002250 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig91cca5df2008-10-30 16:58:01 +11002251 if ((error = xfs_btree_delete(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002253 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig8df4da42008-10-30 16:55:58 +11002254 if ((error = xfs_btree_decrement(cur, 0, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002256 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002257 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002258 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 goto done;
2260 }
2261 break;
2262
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002263 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 /*
2265 * Setting all of a previous oldext extent to newext.
2266 * Neither the left nor right neighbors are contiguous with
2267 * the new one.
2268 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002269 PREV.br_state = new->br_state;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002270 xfs_iext_update_extent(ip, state, icur, &PREV);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 if (cur == NULL)
2273 rval = XFS_ILOG_DEXT;
2274 else {
2275 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002276 error = xfs_bmbt_lookup_eq(cur, new, &i);
2277 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002279 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002280 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002281 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 goto done;
2283 }
2284 break;
2285
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002286 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 /*
2288 * Setting the first part of a previous oldext extent to newext.
2289 * The left neighbor is contiguous.
2290 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002291 LEFT.br_blockcount += new->br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002292
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002293 old = PREV;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002294 PREV.br_startoff += new->br_blockcount;
2295 PREV.br_startblock += new->br_blockcount;
2296 PREV.br_blockcount -= new->br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002297
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002298 xfs_iext_update_extent(ip, state, icur, &PREV);
2299 xfs_iext_prev(ifp, icur);
2300 xfs_iext_update_extent(ip, state, icur, &LEFT);
Christoph Hellwigec90c552011-05-23 08:52:53 +00002301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 if (cur == NULL)
2303 rval = XFS_ILOG_DEXT;
2304 else {
2305 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002306 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002307 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002309 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002310 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002311 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 goto done;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002313 error = xfs_btree_decrement(cur, 0, &i);
2314 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 goto done;
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002316 error = xfs_bmbt_update(cur, &LEFT);
Christoph Hellwigb0eab142011-09-18 20:41:06 +00002317 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 goto done;
2319 }
2320 break;
2321
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002322 case BMAP_LEFT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 /*
2324 * Setting the first part of a previous oldext extent to newext.
2325 * The left neighbor is not contiguous.
2326 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002327 old = PREV;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002328 PREV.br_startoff += new->br_blockcount;
2329 PREV.br_startblock += new->br_blockcount;
2330 PREV.br_blockcount -= new->br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002331
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002332 xfs_iext_update_extent(ip, state, icur, &PREV);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07002333 xfs_iext_insert(ip, icur, new, state);
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002334 XFS_IFORK_NEXT_SET(ip, whichfork,
2335 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 if (cur == NULL)
2337 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2338 else {
2339 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002340 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002341 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002343 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002344 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002345 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 goto done;
2347 cur->bc_rec.b = *new;
Christoph Hellwig4b22a572008-10-30 16:57:40 +11002348 if ((error = xfs_btree_insert(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002350 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 }
2352 break;
2353
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002354 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 /*
2356 * Setting the last part of a previous oldext extent to newext.
2357 * The right neighbor is contiguous with the new allocation.
2358 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002359 old = PREV;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002360 PREV.br_blockcount -= new->br_blockcount;
Christoph Hellwigec90c552011-05-23 08:52:53 +00002361
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002362 RIGHT.br_startoff = new->br_startoff;
2363 RIGHT.br_startblock = new->br_startblock;
2364 RIGHT.br_blockcount += new->br_blockcount;
Christoph Hellwiga6818472017-11-03 10:34:40 -07002365
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002366 xfs_iext_update_extent(ip, state, icur, &PREV);
2367 xfs_iext_next(ifp, icur);
2368 xfs_iext_update_extent(ip, state, icur, &RIGHT);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002369
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 if (cur == NULL)
2371 rval = XFS_ILOG_DEXT;
2372 else {
2373 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002374 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002375 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002377 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002378 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002379 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 goto done;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002381 error = xfs_btree_increment(cur, 0, &i);
2382 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 goto done;
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002384 error = xfs_bmbt_update(cur, &RIGHT);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002385 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 goto done;
2387 }
2388 break;
2389
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002390 case BMAP_RIGHT_FILLING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 /*
2392 * Setting the last part of a previous oldext extent to newext.
2393 * The right neighbor is not contiguous.
2394 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002395 old = PREV;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002396 PREV.br_blockcount -= new->br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002397
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002398 xfs_iext_update_extent(ip, state, icur, &PREV);
2399 xfs_iext_next(ifp, icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07002400 xfs_iext_insert(ip, icur, new, state);
Christoph Hellwigec90c552011-05-23 08:52:53 +00002401
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002402 XFS_IFORK_NEXT_SET(ip, whichfork,
2403 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 if (cur == NULL)
2405 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2406 else {
2407 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002408 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002409 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002411 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002412 error = xfs_bmbt_update(cur, &PREV);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002413 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 goto done;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002415 error = xfs_bmbt_lookup_eq(cur, new, &i);
2416 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002418 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig4b22a572008-10-30 16:57:40 +11002419 if ((error = xfs_btree_insert(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002421 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 }
2423 break;
2424
2425 case 0:
2426 /*
2427 * Setting the middle part of a previous oldext extent to
2428 * newext. Contiguity is impossible here.
2429 * One extent becomes three extents.
2430 */
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002431 old = PREV;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002432 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 r[0] = *new;
2435 r[1].br_startoff = new_endoff;
2436 r[1].br_blockcount =
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002437 old.br_startoff + old.br_blockcount - new_endoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 r[1].br_startblock = new->br_startblock + new->br_blockcount;
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002439 r[1].br_state = PREV.br_state;
Christoph Hellwigec90c552011-05-23 08:52:53 +00002440
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002441 xfs_iext_update_extent(ip, state, icur, &PREV);
2442 xfs_iext_next(ifp, icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07002443 xfs_iext_insert(ip, icur, &r[1], state);
2444 xfs_iext_insert(ip, icur, &r[0], state);
Christoph Hellwigec90c552011-05-23 08:52:53 +00002445
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002446 XFS_IFORK_NEXT_SET(ip, whichfork,
2447 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 if (cur == NULL)
2449 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2450 else {
2451 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002452 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwig79fa6142017-10-17 14:16:25 -07002453 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002455 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 /* new right extent - oldext */
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002457 error = xfs_bmbt_update(cur, &r[1]);
2458 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 goto done;
2460 /* new left extent - oldext */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 cur->bc_rec.b = PREV;
Christoph Hellwig4b22a572008-10-30 16:57:40 +11002462 if ((error = xfs_btree_insert(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002464 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Lachlan McIlroyddea2d52008-06-23 13:25:53 +10002465 /*
2466 * Reset the cursor to the position of the new extent
2467 * we are about to insert as we can't trust it after
2468 * the previous insert.
2469 */
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002470 error = xfs_bmbt_lookup_eq(cur, new, &i);
2471 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002473 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 /* new middle extent - newext */
Christoph Hellwig4b22a572008-10-30 16:57:40 +11002475 if ((error = xfs_btree_insert(cur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002477 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 }
2479 break;
2480
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002481 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2482 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2483 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2484 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2485 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2486 case BMAP_LEFT_CONTIG:
2487 case BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 /*
2489 * These cases are all impossible.
2490 */
2491 ASSERT(0);
2492 }
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002493
Darrick J. Wong9c194642016-08-03 12:16:05 +10002494 /* update reverse mappings */
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002495 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
Darrick J. Wong9c194642016-08-03 12:16:05 +10002496 if (error)
2497 goto done;
2498
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002499 /* convert to a btree if necessary */
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002500 if (xfs_bmap_needs_btree(ip, whichfork)) {
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002501 int tmp_logflags; /* partial log flag return val */
2502
2503 ASSERT(cur == NULL);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10002504 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002505 0, &tmp_logflags, whichfork);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002506 *logflagsp |= tmp_logflags;
2507 if (error)
2508 goto done;
2509 }
2510
2511 /* clear out the allocated field, done with it now in any case. */
2512 if (cur) {
2513 cur->bc_private.b.allocated = 0;
2514 *curp = cur;
2515 }
2516
Darrick J. Wong05a630d2017-02-02 15:14:01 -08002517 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518done:
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002519 *logflagsp |= rval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 return error;
2521#undef LEFT
2522#undef RIGHT
2523#undef PREV
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524}
2525
2526/*
Christoph Hellwig1fd044d2011-09-18 20:40:49 +00002527 * Convert a hole to a delayed allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 */
Christoph Hellwig1fd044d2011-09-18 20:40:49 +00002529STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530xfs_bmap_add_extent_hole_delay(
2531 xfs_inode_t *ip, /* incore inode pointer */
Darrick J. Wongbe51f812016-10-03 09:11:32 -07002532 int whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002533 struct xfs_iext_cursor *icur,
Christoph Hellwig1fd044d2011-09-18 20:40:49 +00002534 xfs_bmbt_irec_t *new) /* new data to add to file extents */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535{
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11002536 xfs_ifork_t *ifp; /* inode fork pointer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2538 xfs_filblks_t newlen=0; /* new indirect size */
2539 xfs_filblks_t oldlen=0; /* old indirect size */
2540 xfs_bmbt_irec_t right; /* right neighbor extent entry */
Christoph Hellwig060ea652017-10-19 11:02:29 -07002541 int state = xfs_bmap_fork_to_state(whichfork);
Christoph Hellwig3ffc18e2017-10-17 14:16:23 -07002542 xfs_filblks_t temp; /* temp for indirect calculations */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Darrick J. Wongbe51f812016-10-03 09:11:32 -07002544 ifp = XFS_IFORK_PTR(ip, whichfork);
Eric Sandeen9d87c312009-01-14 23:22:07 -06002545 ASSERT(isnullstartblock(new->br_startblock));
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 /*
2548 * Check and set flags if this segment has a left neighbor
2549 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002550 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002551 state |= BMAP_LEFT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002552 if (isnullstartblock(left.br_startblock))
2553 state |= BMAP_LEFT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002555
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 /*
2557 * Check and set flags if the current (right) segment exists.
2558 * If it doesn't exist, we're converting the hole at end-of-file.
2559 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002560 if (xfs_iext_get_extent(ifp, icur, &right)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002561 state |= BMAP_RIGHT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002562 if (isnullstartblock(right.br_startblock))
2563 state |= BMAP_RIGHT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 /*
2567 * Set contiguity flags on the left and right neighbors.
2568 * Don't let extents get too large, even if the pieces are contiguous.
2569 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002570 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2571 left.br_startoff + left.br_blockcount == new->br_startoff &&
2572 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2573 state |= BMAP_LEFT_CONTIG;
2574
2575 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2576 new->br_startoff + new->br_blockcount == right.br_startoff &&
2577 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2578 (!(state & BMAP_LEFT_CONTIG) ||
2579 (left.br_blockcount + new->br_blockcount +
2580 right.br_blockcount <= MAXEXTLEN)))
2581 state |= BMAP_RIGHT_CONTIG;
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 /*
2584 * Switch out based on the contiguity flags.
2585 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002586 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2587 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 /*
2589 * New allocation is contiguous with delayed allocations
2590 * on the left and on the right.
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11002591 * Merge all three into a single extent record.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 */
2593 temp = left.br_blockcount + new->br_blockcount +
2594 right.br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002595
Eric Sandeen9d87c312009-01-14 23:22:07 -06002596 oldlen = startblockval(left.br_startblock) +
2597 startblockval(new->br_startblock) +
2598 startblockval(right.br_startblock);
Brian Foster0e339ef2017-02-13 22:48:18 -08002599 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2600 oldlen);
Christoph Hellwig3ffc18e2017-10-17 14:16:23 -07002601 left.br_startblock = nullstartblock(newlen);
2602 left.br_blockcount = temp;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002603
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07002604 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002605 xfs_iext_prev(ifp, icur);
2606 xfs_iext_update_extent(ip, state, icur, &left);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 break;
2608
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002609 case BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 /*
2611 * New allocation is contiguous with a delayed allocation
2612 * on the left.
2613 * Merge the new allocation with the left neighbor.
2614 */
2615 temp = left.br_blockcount + new->br_blockcount;
Christoph Hellwigec90c552011-05-23 08:52:53 +00002616
Eric Sandeen9d87c312009-01-14 23:22:07 -06002617 oldlen = startblockval(left.br_startblock) +
2618 startblockval(new->br_startblock);
Brian Foster0e339ef2017-02-13 22:48:18 -08002619 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2620 oldlen);
Christoph Hellwig3ffc18e2017-10-17 14:16:23 -07002621 left.br_blockcount = temp;
2622 left.br_startblock = nullstartblock(newlen);
Christoph Hellwig41d196f2017-11-03 10:34:39 -07002623
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002624 xfs_iext_prev(ifp, icur);
2625 xfs_iext_update_extent(ip, state, icur, &left);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 break;
2627
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002628 case BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 /*
2630 * New allocation is contiguous with a delayed allocation
2631 * on the right.
2632 * Merge the new allocation with the right neighbor.
2633 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 temp = new->br_blockcount + right.br_blockcount;
Eric Sandeen9d87c312009-01-14 23:22:07 -06002635 oldlen = startblockval(new->br_startblock) +
2636 startblockval(right.br_startblock);
Brian Foster0e339ef2017-02-13 22:48:18 -08002637 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2638 oldlen);
Christoph Hellwig3ffc18e2017-10-17 14:16:23 -07002639 right.br_startoff = new->br_startoff;
2640 right.br_startblock = nullstartblock(newlen);
2641 right.br_blockcount = temp;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002642 xfs_iext_update_extent(ip, state, icur, &right);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 break;
2644
2645 case 0:
2646 /*
2647 * New allocation is not contiguous with another
2648 * delayed allocation.
2649 * Insert a new entry.
2650 */
2651 oldlen = newlen = 0;
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07002652 xfs_iext_insert(ip, icur, new, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 break;
2654 }
2655 if (oldlen != newlen) {
2656 ASSERT(oldlen > newlen);
Dave Chinner0d485ad2015-02-23 21:22:03 +11002657 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2658 false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 /*
2660 * Nothing to do for disk quota accounting here.
2661 */
2662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663}
2664
2665/*
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002666 * Convert a hole to a real allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 */
2668STATIC int /* error */
2669xfs_bmap_add_extent_hole_real(
Christoph Hellwig6d045582017-04-11 16:45:54 -07002670 struct xfs_trans *tp,
2671 struct xfs_inode *ip,
2672 int whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002673 struct xfs_iext_cursor *icur,
Christoph Hellwig6d045582017-04-11 16:45:54 -07002674 struct xfs_btree_cur **curp,
2675 struct xfs_bmbt_irec *new,
2676 xfs_fsblock_t *first,
2677 struct xfs_defer_ops *dfops,
Darrick J. Wong95eb3082018-05-09 10:02:32 -07002678 int *logflagsp,
2679 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680{
Christoph Hellwig6d045582017-04-11 16:45:54 -07002681 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2682 struct xfs_mount *mp = ip->i_mount;
2683 struct xfs_btree_cur *cur = *curp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 int error; /* error return value */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 int i; /* temp state */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2687 xfs_bmbt_irec_t right; /* right neighbor extent entry */
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002688 int rval=0; /* return value (logging flags) */
Christoph Hellwig060ea652017-10-19 11:02:29 -07002689 int state = xfs_bmap_fork_to_state(whichfork);
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002690 struct xfs_bmbt_irec old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002692 ASSERT(!isnullstartblock(new->br_startblock));
Christoph Hellwig6d045582017-04-11 16:45:54 -07002693 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002694
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11002695 XFS_STATS_INC(mp, xs_add_exlist);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002696
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 /*
2698 * Check and set flags if this segment has a left neighbor.
2699 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002700 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002701 state |= BMAP_LEFT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002702 if (isnullstartblock(left.br_startblock))
2703 state |= BMAP_LEFT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002705
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 /*
2707 * Check and set flags if this segment has a current value.
2708 * Not true if we're inserting into the "hole" at eof.
2709 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002710 if (xfs_iext_get_extent(ifp, icur, &right)) {
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002711 state |= BMAP_RIGHT_VALID;
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002712 if (isnullstartblock(right.br_startblock))
2713 state |= BMAP_RIGHT_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 }
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 /*
2717 * We're inserting a real allocation between "left" and "right".
2718 * Set the contiguity flags. Don't let extents get too large.
2719 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002720 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2721 left.br_startoff + left.br_blockcount == new->br_startoff &&
2722 left.br_startblock + left.br_blockcount == new->br_startblock &&
2723 left.br_state == new->br_state &&
2724 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2725 state |= BMAP_LEFT_CONTIG;
2726
2727 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2728 new->br_startoff + new->br_blockcount == right.br_startoff &&
2729 new->br_startblock + new->br_blockcount == right.br_startblock &&
2730 new->br_state == right.br_state &&
2731 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2732 (!(state & BMAP_LEFT_CONTIG) ||
2733 left.br_blockcount + new->br_blockcount +
2734 right.br_blockcount <= MAXEXTLEN))
2735 state |= BMAP_RIGHT_CONTIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002737 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 /*
2739 * Select which case we're in here, and implement it.
2740 */
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002741 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2742 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 /*
2744 * New allocation is contiguous with real allocations on the
2745 * left and on the right.
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11002746 * Merge all three into a single extent record.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 */
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002748 left.br_blockcount += new->br_blockcount + right.br_blockcount;
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002749
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07002750 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002751 xfs_iext_prev(ifp, icur);
2752 xfs_iext_update_extent(ip, state, icur, &left);
Christoph Hellwigec90c552011-05-23 08:52:53 +00002753
Christoph Hellwig6d045582017-04-11 16:45:54 -07002754 XFS_IFORK_NEXT_SET(ip, whichfork,
2755 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2756 if (cur == NULL) {
Eric Sandeen9d87c312009-01-14 23:22:07 -06002757 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002758 } else {
2759 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002760 error = xfs_bmbt_lookup_eq(cur, &right, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002761 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002762 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002763 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig6d045582017-04-11 16:45:54 -07002764 error = xfs_btree_delete(cur, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002765 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002766 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002767 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwig6d045582017-04-11 16:45:54 -07002768 error = xfs_btree_decrement(cur, 0, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002769 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002770 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002771 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002772 error = xfs_bmbt_update(cur, &left);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002773 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002774 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 }
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002776 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002778 case BMAP_LEFT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 /*
2780 * New allocation is contiguous with a real allocation
2781 * on the left.
2782 * Merge the new allocation with the left neighbor.
2783 */
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002784 old = left;
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002785 left.br_blockcount += new->br_blockcount;
Christoph Hellwig1d2e0082017-11-03 10:34:40 -07002786
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002787 xfs_iext_prev(ifp, icur);
2788 xfs_iext_update_extent(ip, state, icur, &left);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002789
Christoph Hellwig6d045582017-04-11 16:45:54 -07002790 if (cur == NULL) {
Eric Sandeen9d87c312009-01-14 23:22:07 -06002791 rval = xfs_ilog_fext(whichfork);
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002792 } else {
2793 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002794 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002795 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002796 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002797 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002798 error = xfs_bmbt_update(cur, &left);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002799 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002800 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 }
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002802 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
Christoph Hellwig7574aa92009-11-25 00:00:19 +00002804 case BMAP_RIGHT_CONTIG:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 /*
2806 * New allocation is contiguous with a real allocation
2807 * on the right.
2808 * Merge the new allocation with the right neighbor.
2809 */
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002810 old = right;
Christoph Hellwigca5d8e5b2017-10-19 11:04:44 -07002811
Christoph Hellwig1abb9e52017-10-17 14:16:24 -07002812 right.br_startoff = new->br_startoff;
2813 right.br_startblock = new->br_startblock;
2814 right.br_blockcount += new->br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07002815 xfs_iext_update_extent(ip, state, icur, &right);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00002816
Christoph Hellwig6d045582017-04-11 16:45:54 -07002817 if (cur == NULL) {
Eric Sandeen9d87c312009-01-14 23:22:07 -06002818 rval = xfs_ilog_fext(whichfork);
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002819 } else {
2820 rval = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002821 error = xfs_bmbt_lookup_eq(cur, &old, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002822 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002823 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002824 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07002825 error = xfs_bmbt_update(cur, &right);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002826 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002827 goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 }
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002829 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
2831 case 0:
2832 /*
2833 * New allocation is not contiguous with another
2834 * real allocation.
2835 * Insert a new entry.
2836 */
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07002837 xfs_iext_insert(ip, icur, new, state);
Christoph Hellwig6d045582017-04-11 16:45:54 -07002838 XFS_IFORK_NEXT_SET(ip, whichfork,
2839 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2840 if (cur == NULL) {
Eric Sandeen9d87c312009-01-14 23:22:07 -06002841 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002842 } else {
2843 rval = XFS_ILOG_CORE;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07002844 error = xfs_bmbt_lookup_eq(cur, new, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002845 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002846 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002847 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
Christoph Hellwig6d045582017-04-11 16:45:54 -07002848 error = xfs_btree_insert(cur, &i);
Christoph Hellwigc6534242011-09-18 20:41:05 +00002849 if (error)
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002850 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11002851 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002853 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002855
Darrick J. Wong95eb3082018-05-09 10:02:32 -07002856 /* add reverse mapping unless caller opted out */
2857 if (!(flags & XFS_BMAPI_NORMAP)) {
2858 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
2859 if (error)
2860 goto done;
2861 }
Darrick J. Wong9c194642016-08-03 12:16:05 +10002862
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002863 /* convert to a btree if necessary */
Christoph Hellwig6d045582017-04-11 16:45:54 -07002864 if (xfs_bmap_needs_btree(ip, whichfork)) {
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002865 int tmp_logflags; /* partial log flag return val */
2866
Christoph Hellwig6d045582017-04-11 16:45:54 -07002867 ASSERT(cur == NULL);
2868 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
Christoph Hellwigc6534242011-09-18 20:41:05 +00002869 0, &tmp_logflags, whichfork);
Christoph Hellwig6d045582017-04-11 16:45:54 -07002870 *logflagsp |= tmp_logflags;
2871 cur = *curp;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00002872 if (error)
2873 goto done;
2874 }
2875
2876 /* clear out the allocated field, done with it now in any case. */
Christoph Hellwig6d045582017-04-11 16:45:54 -07002877 if (cur)
2878 cur->bc_private.b.allocated = 0;
Christoph Hellwigc6534242011-09-18 20:41:05 +00002879
Christoph Hellwig6d045582017-04-11 16:45:54 -07002880 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002881done:
Christoph Hellwig6d045582017-04-11 16:45:54 -07002882 *logflagsp |= rval;
Olaf Weber3e57ecf2006-06-09 14:48:12 +10002883 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884}
2885
Nathan Scottdd9f4382006-01-11 15:28:28 +11002886/*
Dave Chinner9e5987a72013-02-25 12:31:26 +11002887 * Functions used in the extent read, allocate and remove paths
2888 */
2889
2890/*
Nathan Scottdd9f4382006-01-11 15:28:28 +11002891 * Adjust the size of the new extent based on di_extsize and rt extsize.
2892 */
Dave Chinner68988112013-08-12 20:49:42 +10002893int
Nathan Scottdd9f4382006-01-11 15:28:28 +11002894xfs_bmap_extsize_align(
2895 xfs_mount_t *mp,
2896 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2897 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2898 xfs_extlen_t extsz, /* align to this extent size */
2899 int rt, /* is this a realtime inode? */
2900 int eof, /* is extent at end-of-file? */
2901 int delay, /* creating delalloc extent? */
2902 int convert, /* overwriting unwritten extent? */
2903 xfs_fileoff_t *offp, /* in/out: aligned offset */
2904 xfs_extlen_t *lenp) /* in/out: aligned length */
2905{
2906 xfs_fileoff_t orig_off; /* original offset */
2907 xfs_extlen_t orig_alen; /* original length */
2908 xfs_fileoff_t orig_end; /* original off+len */
2909 xfs_fileoff_t nexto; /* next file offset */
2910 xfs_fileoff_t prevo; /* previous file offset */
2911 xfs_fileoff_t align_off; /* temp for offset */
2912 xfs_extlen_t align_alen; /* temp for length */
2913 xfs_extlen_t temp; /* temp for calculations */
2914
2915 if (convert)
2916 return 0;
2917
2918 orig_off = align_off = *offp;
2919 orig_alen = align_alen = *lenp;
2920 orig_end = orig_off + orig_alen;
2921
2922 /*
2923 * If this request overlaps an existing extent, then don't
2924 * attempt to perform any additional alignment.
2925 */
2926 if (!delay && !eof &&
2927 (orig_off >= gotp->br_startoff) &&
2928 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2929 return 0;
2930 }
2931
2932 /*
2933 * If the file offset is unaligned vs. the extent size
2934 * we need to align it. This will be possible unless
2935 * the file was previously written with a kernel that didn't
2936 * perform this alignment, or if a truncate shot us in the
2937 * foot.
2938 */
2939 temp = do_mod(orig_off, extsz);
2940 if (temp) {
2941 align_alen += temp;
2942 align_off -= temp;
2943 }
Dave Chinner6dea405e2015-05-29 07:40:06 +10002944
2945 /* Same adjustment for the end of the requested area. */
2946 temp = (align_alen % extsz);
2947 if (temp)
Nathan Scottdd9f4382006-01-11 15:28:28 +11002948 align_alen += extsz - temp;
Dave Chinner6dea405e2015-05-29 07:40:06 +10002949
2950 /*
2951 * For large extent hint sizes, the aligned extent might be larger than
2952 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2953 * the length back under MAXEXTLEN. The outer allocation loops handle
2954 * short allocation just fine, so it is safe to do this. We only want to
2955 * do it when we are forced to, though, because it means more allocation
2956 * operations are required.
2957 */
2958 while (align_alen > MAXEXTLEN)
2959 align_alen -= extsz;
2960 ASSERT(align_alen <= MAXEXTLEN);
2961
Nathan Scottdd9f4382006-01-11 15:28:28 +11002962 /*
2963 * If the previous block overlaps with this proposed allocation
2964 * then move the start forward without adjusting the length.
2965 */
2966 if (prevp->br_startoff != NULLFILEOFF) {
2967 if (prevp->br_startblock == HOLESTARTBLOCK)
2968 prevo = prevp->br_startoff;
2969 else
2970 prevo = prevp->br_startoff + prevp->br_blockcount;
2971 } else
2972 prevo = 0;
2973 if (align_off != orig_off && align_off < prevo)
2974 align_off = prevo;
2975 /*
2976 * If the next block overlaps with this proposed allocation
2977 * then move the start back without adjusting the length,
2978 * but not before offset 0.
2979 * This may of course make the start overlap previous block,
2980 * and if we hit the offset 0 limit then the next block
2981 * can still overlap too.
2982 */
2983 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2984 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2985 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2986 nexto = gotp->br_startoff + gotp->br_blockcount;
2987 else
2988 nexto = gotp->br_startoff;
2989 } else
2990 nexto = NULLFILEOFF;
2991 if (!eof &&
2992 align_off + align_alen != orig_end &&
2993 align_off + align_alen > nexto)
2994 align_off = nexto > align_alen ? nexto - align_alen : 0;
2995 /*
2996 * If we're now overlapping the next or previous extent that
2997 * means we can't fit an extsz piece in this hole. Just move
2998 * the start forward to the first valid spot and set
2999 * the length so we hit the end.
3000 */
3001 if (align_off != orig_off && align_off < prevo)
3002 align_off = prevo;
3003 if (align_off + align_alen != orig_end &&
3004 align_off + align_alen > nexto &&
3005 nexto != NULLFILEOFF) {
3006 ASSERT(nexto > prevo);
3007 align_alen = nexto - align_off;
3008 }
3009
3010 /*
3011 * If realtime, and the result isn't a multiple of the realtime
3012 * extent size we need to remove blocks until it is.
3013 */
3014 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3015 /*
3016 * We're not covering the original request, or
3017 * we won't be able to once we fix the length.
3018 */
3019 if (orig_off < align_off ||
3020 orig_end > align_off + align_alen ||
3021 align_alen - temp < orig_alen)
Dave Chinner24513372014-06-25 14:58:08 +10003022 return -EINVAL;
Nathan Scottdd9f4382006-01-11 15:28:28 +11003023 /*
3024 * Try to fix it by moving the start up.
3025 */
3026 if (align_off + temp <= orig_off) {
3027 align_alen -= temp;
3028 align_off += temp;
3029 }
3030 /*
3031 * Try to fix it by moving the end in.
3032 */
3033 else if (align_off + align_alen - temp >= orig_end)
3034 align_alen -= temp;
3035 /*
3036 * Set the start to the minimum then trim the length.
3037 */
3038 else {
3039 align_alen -= orig_off - align_off;
3040 align_off = orig_off;
3041 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3042 }
3043 /*
3044 * Result doesn't cover the request, fail it.
3045 */
3046 if (orig_off < align_off || orig_end > align_off + align_alen)
Dave Chinner24513372014-06-25 14:58:08 +10003047 return -EINVAL;
Nathan Scottdd9f4382006-01-11 15:28:28 +11003048 } else {
3049 ASSERT(orig_off >= align_off);
Dave Chinner6dea405e2015-05-29 07:40:06 +10003050 /* see MAXEXTLEN handling above */
3051 ASSERT(orig_end <= align_off + align_alen ||
3052 align_alen + extsz > MAXEXTLEN);
Nathan Scottdd9f4382006-01-11 15:28:28 +11003053 }
3054
3055#ifdef DEBUG
3056 if (!eof && gotp->br_startoff != NULLFILEOFF)
3057 ASSERT(align_off + align_alen <= gotp->br_startoff);
3058 if (prevp->br_startoff != NULLFILEOFF)
3059 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3060#endif
3061
3062 *lenp = align_alen;
3063 *offp = align_off;
3064 return 0;
3065}
3066
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067#define XFS_ALLOC_GAP_UNITS 4
3068
Dave Chinner68988112013-08-12 20:49:42 +10003069void
Nathan Scotta365bdd2006-03-14 13:34:16 +11003070xfs_bmap_adjacent(
Dave Chinner68988112013-08-12 20:49:42 +10003071 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072{
3073 xfs_fsblock_t adjust; /* adjustment to block numbers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3075 xfs_mount_t *mp; /* mount point structure */
3076 int nullfb; /* true if ap->firstblock isn't set */
3077 int rt; /* true if inode is realtime */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
3079#define ISVALID(x,y) \
3080 (rt ? \
3081 (x) < mp->m_sb.sb_rblocks : \
3082 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3083 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3084 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 mp = ap->ip->i_mount;
Dave Chinner0937e0f2011-09-18 20:40:57 +00003087 nullfb = *ap->firstblock == NULLFSBLOCK;
Dave Chinner292378e2016-09-26 08:21:28 +10003088 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3089 xfs_alloc_is_userdata(ap->datatype);
Dave Chinner0937e0f2011-09-18 20:40:57 +00003090 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 /*
3092 * If allocating at eof, and there's a previous real block,
Malcolm Parsons9da096f2009-03-29 09:55:42 +02003093 * try to use its last block as our starting point.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003095 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3096 !isnullstartblock(ap->prev.br_startblock) &&
3097 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3098 ap->prev.br_startblock)) {
Dave Chinner3a756672011-09-18 20:40:58 +00003099 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 /*
3101 * Adjust for the gap between prevp and us.
3102 */
Dave Chinner3a756672011-09-18 20:40:58 +00003103 adjust = ap->offset -
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003104 (ap->prev.br_startoff + ap->prev.br_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 if (adjust &&
Dave Chinner3a756672011-09-18 20:40:58 +00003106 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3107 ap->blkno += adjust;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 }
3109 /*
3110 * If not at eof, then compare the two neighbor blocks.
3111 * Figure out whether either one gives us a good starting point,
3112 * and pick the better one.
3113 */
3114 else if (!ap->eof) {
3115 xfs_fsblock_t gotbno; /* right side block number */
3116 xfs_fsblock_t gotdiff=0; /* right side difference */
3117 xfs_fsblock_t prevbno; /* left side block number */
3118 xfs_fsblock_t prevdiff=0; /* left side difference */
3119
3120 /*
3121 * If there's a previous (left) block, select a requested
3122 * start block based on it.
3123 */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003124 if (ap->prev.br_startoff != NULLFILEOFF &&
3125 !isnullstartblock(ap->prev.br_startblock) &&
3126 (prevbno = ap->prev.br_startblock +
3127 ap->prev.br_blockcount) &&
3128 ISVALID(prevbno, ap->prev.br_startblock)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 /*
3130 * Calculate gap to end of previous block.
3131 */
Dave Chinner3a756672011-09-18 20:40:58 +00003132 adjust = prevdiff = ap->offset -
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003133 (ap->prev.br_startoff +
3134 ap->prev.br_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 /*
3136 * Figure the startblock based on the previous block's
3137 * end and the gap size.
3138 * Heuristic!
3139 * If the gap is large relative to the piece we're
3140 * allocating, or using it gives us an invalid block
3141 * number, then just use the end of the previous block.
3142 */
Dave Chinner3a756672011-09-18 20:40:58 +00003143 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 ISVALID(prevbno + prevdiff,
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003145 ap->prev.br_startblock))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 prevbno += adjust;
3147 else
3148 prevdiff += adjust;
3149 /*
3150 * If the firstblock forbids it, can't use it,
3151 * must use default.
3152 */
3153 if (!rt && !nullfb &&
3154 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3155 prevbno = NULLFSBLOCK;
3156 }
3157 /*
3158 * No previous block or can't follow it, just default.
3159 */
3160 else
3161 prevbno = NULLFSBLOCK;
3162 /*
3163 * If there's a following (right) block, select a requested
3164 * start block based on it.
3165 */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003166 if (!isnullstartblock(ap->got.br_startblock)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 /*
3168 * Calculate gap to start of next block.
3169 */
Dave Chinner3a756672011-09-18 20:40:58 +00003170 adjust = gotdiff = ap->got.br_startoff - ap->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 /*
3172 * Figure the startblock based on the next block's
3173 * start and the gap size.
3174 */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003175 gotbno = ap->got.br_startblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 /*
3177 * Heuristic!
3178 * If the gap is large relative to the piece we're
3179 * allocating, or using it gives us an invalid block
3180 * number, then just use the start of the next block
3181 * offset by our length.
3182 */
Dave Chinner3a756672011-09-18 20:40:58 +00003183 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 ISVALID(gotbno - gotdiff, gotbno))
3185 gotbno -= adjust;
Dave Chinner3a756672011-09-18 20:40:58 +00003186 else if (ISVALID(gotbno - ap->length, gotbno)) {
3187 gotbno -= ap->length;
3188 gotdiff += adjust - ap->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 } else
3190 gotdiff += adjust;
3191 /*
3192 * If the firstblock forbids it, can't use it,
3193 * must use default.
3194 */
3195 if (!rt && !nullfb &&
3196 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3197 gotbno = NULLFSBLOCK;
3198 }
3199 /*
3200 * No next block, just default.
3201 */
3202 else
3203 gotbno = NULLFSBLOCK;
3204 /*
3205 * If both valid, pick the better one, else the only good
Dave Chinner3a756672011-09-18 20:40:58 +00003206 * one, else ap->blkno is already set (to 0 or the inode block).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 */
3208 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
Dave Chinner3a756672011-09-18 20:40:58 +00003209 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 else if (prevbno != NULLFSBLOCK)
Dave Chinner3a756672011-09-18 20:40:58 +00003211 ap->blkno = prevbno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 else if (gotbno != NULLFSBLOCK)
Dave Chinner3a756672011-09-18 20:40:58 +00003213 ap->blkno = gotbno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 }
Nathan Scotta365bdd2006-03-14 13:34:16 +11003215#undef ISVALID
Nathan Scotta365bdd2006-03-14 13:34:16 +11003216}
3217
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003218static int
3219xfs_bmap_longest_free_extent(
3220 struct xfs_trans *tp,
3221 xfs_agnumber_t ag,
3222 xfs_extlen_t *blen,
3223 int *notinit)
3224{
3225 struct xfs_mount *mp = tp->t_mountp;
3226 struct xfs_perag *pag;
3227 xfs_extlen_t longest;
3228 int error = 0;
3229
3230 pag = xfs_perag_get(mp, ag);
3231 if (!pag->pagf_init) {
3232 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3233 if (error)
3234 goto out;
3235
3236 if (!pag->pagf_init) {
3237 *notinit = 1;
3238 goto out;
3239 }
3240 }
3241
Eric Sandeena1f69412018-04-06 10:09:42 -07003242 longest = xfs_alloc_longest_free_extent(pag,
Darrick J. Wong3fd129b2016-09-19 10:30:52 +10003243 xfs_alloc_min_freelist(mp, pag),
3244 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003245 if (*blen < longest)
3246 *blen = longest;
3247
3248out:
3249 xfs_perag_put(pag);
3250 return error;
3251}
3252
3253static void
3254xfs_bmap_select_minlen(
3255 struct xfs_bmalloca *ap,
3256 struct xfs_alloc_arg *args,
3257 xfs_extlen_t *blen,
3258 int notinit)
3259{
3260 if (notinit || *blen < ap->minlen) {
3261 /*
3262 * Since we did a BUF_TRYLOCK above, it is possible that
3263 * there is space for this request.
3264 */
3265 args->minlen = ap->minlen;
3266 } else if (*blen < args->maxlen) {
3267 /*
3268 * If the best seen length is less than the request length,
3269 * use the best as the minimum.
3270 */
3271 args->minlen = *blen;
3272 } else {
3273 /*
3274 * Otherwise we've seen an extent as big as maxlen, use that
3275 * as the minimum.
3276 */
3277 args->minlen = args->maxlen;
3278 }
3279}
3280
Nathan Scotta365bdd2006-03-14 13:34:16 +11003281STATIC int
Christoph Hellwigc467c042010-02-15 23:34:42 +00003282xfs_bmap_btalloc_nullfb(
3283 struct xfs_bmalloca *ap,
3284 struct xfs_alloc_arg *args,
3285 xfs_extlen_t *blen)
3286{
3287 struct xfs_mount *mp = ap->ip->i_mount;
Christoph Hellwigc467c042010-02-15 23:34:42 +00003288 xfs_agnumber_t ag, startag;
3289 int notinit = 0;
3290 int error;
3291
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003292 args->type = XFS_ALLOCTYPE_START_BNO;
Christoph Hellwigc467c042010-02-15 23:34:42 +00003293 args->total = ap->total;
3294
Christoph Hellwigc467c042010-02-15 23:34:42 +00003295 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3296 if (startag == NULLAGNUMBER)
3297 startag = ag = 0;
3298
Dave Chinner14b064c2011-01-27 12:16:28 +11003299 while (*blen < args->maxlen) {
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003300 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3301 &notinit);
3302 if (error)
3303 return error;
Christoph Hellwigc467c042010-02-15 23:34:42 +00003304
Christoph Hellwigc467c042010-02-15 23:34:42 +00003305 if (++ag == mp->m_sb.sb_agcount)
3306 ag = 0;
3307 if (ag == startag)
3308 break;
Christoph Hellwigc467c042010-02-15 23:34:42 +00003309 }
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003310
3311 xfs_bmap_select_minlen(ap, args, blen, notinit);
3312 return 0;
3313}
3314
3315STATIC int
3316xfs_bmap_btalloc_filestreams(
3317 struct xfs_bmalloca *ap,
3318 struct xfs_alloc_arg *args,
3319 xfs_extlen_t *blen)
3320{
3321 struct xfs_mount *mp = ap->ip->i_mount;
3322 xfs_agnumber_t ag;
3323 int notinit = 0;
3324 int error;
3325
3326 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3327 args->total = ap->total;
3328
3329 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3330 if (ag == NULLAGNUMBER)
3331 ag = 0;
3332
3333 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
3334 if (error)
3335 return error;
3336
3337 if (*blen < args->maxlen) {
3338 error = xfs_filestream_new_ag(ap, &ag);
3339 if (error)
3340 return error;
3341
3342 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3343 &notinit);
3344 if (error)
3345 return error;
3346
3347 }
3348
3349 xfs_bmap_select_minlen(ap, args, blen, notinit);
Christoph Hellwigc467c042010-02-15 23:34:42 +00003350
3351 /*
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003352 * Set the failure fallback case to look in the selected AG as stream
3353 * may have moved.
Christoph Hellwigc467c042010-02-15 23:34:42 +00003354 */
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003355 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
Christoph Hellwigc467c042010-02-15 23:34:42 +00003356 return 0;
3357}
3358
Darrick J. Wong751f3762018-01-25 13:58:13 -08003359/* Update all inode and quota accounting for the allocation we just did. */
3360static void
3361xfs_bmap_btalloc_accounting(
3362 struct xfs_bmalloca *ap,
3363 struct xfs_alloc_arg *args)
3364{
Darrick J. Wong4b4c1322018-01-19 09:05:48 -08003365 if (ap->flags & XFS_BMAPI_COWFORK) {
3366 /*
3367 * COW fork blocks are in-core only and thus are treated as
3368 * in-core quota reservation (like delalloc blocks) even when
3369 * converted to real blocks. The quota reservation is not
3370 * accounted to disk until blocks are remapped to the data
3371 * fork. So if these blocks were previously delalloc, we
3372 * already have quota reservation and there's nothing to do
3373 * yet.
3374 */
3375 if (ap->wasdel)
3376 return;
3377
3378 /*
3379 * Otherwise, we've allocated blocks in a hole. The transaction
3380 * has acquired in-core quota reservation for this extent.
3381 * Rather than account these as real blocks, however, we reduce
3382 * the transaction quota reservation based on the allocation.
3383 * This essentially transfers the transaction quota reservation
3384 * to that of a delalloc extent.
3385 */
3386 ap->ip->i_delayed_blks += args->len;
3387 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3388 -(long)args->len);
3389 return;
3390 }
3391
3392 /* data/attr fork only */
3393 ap->ip->i_d.di_nblocks += args->len;
Darrick J. Wong751f3762018-01-25 13:58:13 -08003394 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3395 if (ap->wasdel)
3396 ap->ip->i_delayed_blks -= args->len;
3397 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3398 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3399 args->len);
3400}
3401
Christoph Hellwigc467c042010-02-15 23:34:42 +00003402STATIC int
Nathan Scotta365bdd2006-03-14 13:34:16 +11003403xfs_bmap_btalloc(
Dave Chinner68988112013-08-12 20:49:42 +10003404 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
Nathan Scotta365bdd2006-03-14 13:34:16 +11003405{
3406 xfs_mount_t *mp; /* mount point structure */
3407 xfs_alloctype_t atype = 0; /* type for allocation routines */
Dave Chinner292378e2016-09-26 08:21:28 +10003408 xfs_extlen_t align = 0; /* minimum allocation alignment */
Nathan Scotta365bdd2006-03-14 13:34:16 +11003409 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
Christoph Hellwigc467c042010-02-15 23:34:42 +00003410 xfs_agnumber_t ag;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003411 xfs_alloc_arg_t args;
Darrick J. Wong6d8a45c2018-01-19 17:47:36 -08003412 xfs_fileoff_t orig_offset;
3413 xfs_extlen_t orig_length;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003414 xfs_extlen_t blen;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003415 xfs_extlen_t nextminlen = 0;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003416 int nullfb; /* true if ap->firstblock isn't set */
3417 int isaligned;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003418 int tryagain;
3419 int error;
Dave Chinner33177f052013-12-12 16:34:36 +11003420 int stripe_align;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003421
Dave Chinnera99ebf42011-12-01 11:24:20 +00003422 ASSERT(ap->length);
Darrick J. Wong6d8a45c2018-01-19 17:47:36 -08003423 orig_offset = ap->offset;
3424 orig_length = ap->length;
Dave Chinnera99ebf42011-12-01 11:24:20 +00003425
Nathan Scotta365bdd2006-03-14 13:34:16 +11003426 mp = ap->ip->i_mount;
Dave Chinner33177f052013-12-12 16:34:36 +11003427
3428 /* stripe alignment for allocation is determined by mount parameters */
3429 stripe_align = 0;
3430 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3431 stripe_align = mp->m_swidth;
3432 else if (mp->m_dalign)
3433 stripe_align = mp->m_dalign;
3434
Darrick J. Wongf7ca3522016-10-03 09:11:43 -07003435 if (ap->flags & XFS_BMAPI_COWFORK)
3436 align = xfs_get_cowextsz_hint(ap->ip);
3437 else if (xfs_alloc_is_userdata(ap->datatype))
Dave Chinner292378e2016-09-26 08:21:28 +10003438 align = xfs_get_extsz_hint(ap->ip);
Christoph Hellwig493611e2017-01-25 08:59:43 -08003439 if (align) {
Dave Chinnerbaf41a52011-09-18 20:40:56 +00003440 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
Nathan Scotta365bdd2006-03-14 13:34:16 +11003441 align, 0, ap->eof, 0, ap->conv,
Dave Chinner3a756672011-09-18 20:40:58 +00003442 &ap->offset, &ap->length);
Nathan Scotta365bdd2006-03-14 13:34:16 +11003443 ASSERT(!error);
Dave Chinner3a756672011-09-18 20:40:58 +00003444 ASSERT(ap->length);
Nathan Scotta365bdd2006-03-14 13:34:16 +11003445 }
Dave Chinner33177f052013-12-12 16:34:36 +11003446
3447
Dave Chinner0937e0f2011-09-18 20:40:57 +00003448 nullfb = *ap->firstblock == NULLFSBLOCK;
3449 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
David Chinner2a82b8b2007-07-11 11:09:12 +10003450 if (nullfb) {
Dave Chinner292378e2016-09-26 08:21:28 +10003451 if (xfs_alloc_is_userdata(ap->datatype) &&
3452 xfs_inode_is_filestream(ap->ip)) {
David Chinner2a82b8b2007-07-11 11:09:12 +10003453 ag = xfs_filestream_lookup_ag(ap->ip);
3454 ag = (ag != NULLAGNUMBER) ? ag : 0;
Dave Chinner3a756672011-09-18 20:40:58 +00003455 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
David Chinner2a82b8b2007-07-11 11:09:12 +10003456 } else {
Dave Chinner3a756672011-09-18 20:40:58 +00003457 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
David Chinner2a82b8b2007-07-11 11:09:12 +10003458 }
3459 } else
Dave Chinner3a756672011-09-18 20:40:58 +00003460 ap->blkno = *ap->firstblock;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003461
3462 xfs_bmap_adjacent(ap);
3463
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 /*
Dave Chinner3a756672011-09-18 20:40:58 +00003465 * If allowed, use ap->blkno; otherwise must use firstblock since
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 * it's in the right allocation group.
3467 */
Dave Chinner3a756672011-09-18 20:40:58 +00003468 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 ;
3470 else
Dave Chinner3a756672011-09-18 20:40:58 +00003471 ap->blkno = *ap->firstblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 * Normal allocation, done through xfs_alloc_vextent.
3474 */
Nathan Scotta365bdd2006-03-14 13:34:16 +11003475 tryagain = isaligned = 0;
Mark Tinguelya0041682012-09-20 13:16:45 -05003476 memset(&args, 0, sizeof(args));
Nathan Scotta365bdd2006-03-14 13:34:16 +11003477 args.tp = ap->tp;
3478 args.mp = mp;
Dave Chinner3a756672011-09-18 20:40:58 +00003479 args.fsbno = ap->blkno;
Darrick J. Wong340785c2016-08-03 11:33:42 +10003480 xfs_rmap_skip_owner_update(&args.oinfo);
Dave Chinner14b064c2011-01-27 12:16:28 +11003481
3482 /* Trim the allocation back to the maximum an AG can fit. */
Darrick J. Wong52548852016-08-03 11:38:24 +10003483 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
Dave Chinner0937e0f2011-09-18 20:40:57 +00003484 args.firstblock = *ap->firstblock;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003485 blen = 0;
3486 if (nullfb) {
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003487 /*
3488 * Search for an allocation group with a single extent large
3489 * enough for the request. If one isn't found, then adjust
3490 * the minimum allocation size to the largest space found.
3491 */
Dave Chinner292378e2016-09-26 08:21:28 +10003492 if (xfs_alloc_is_userdata(ap->datatype) &&
3493 xfs_inode_is_filestream(ap->ip))
Christoph Hellwigc977eb12014-04-23 07:11:41 +10003494 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3495 else
3496 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
Christoph Hellwigc467c042010-02-15 23:34:42 +00003497 if (error)
3498 return error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003499 } else if (ap->dfops->dop_low) {
David Chinner2a82b8b2007-07-11 11:09:12 +10003500 if (xfs_inode_is_filestream(ap->ip))
3501 args.type = XFS_ALLOCTYPE_FIRST_AG;
3502 else
3503 args.type = XFS_ALLOCTYPE_START_BNO;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003504 args.total = args.minlen = ap->minlen;
3505 } else {
3506 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3507 args.total = ap->total;
3508 args.minlen = ap->minlen;
3509 }
David Chinner957d0eb2007-06-18 16:50:37 +10003510 /* apply extent size hints if obtained earlier */
Christoph Hellwig493611e2017-01-25 08:59:43 -08003511 if (align) {
David Chinner957d0eb2007-06-18 16:50:37 +10003512 args.prod = align;
Dave Chinner3a756672011-09-18 20:40:58 +00003513 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
Nathan Scotta365bdd2006-03-14 13:34:16 +11003514 args.mod = (xfs_extlen_t)(args.prod - args.mod);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003515 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
Nathan Scotta365bdd2006-03-14 13:34:16 +11003516 args.prod = 1;
3517 args.mod = 0;
3518 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003519 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
Dave Chinner3a756672011-09-18 20:40:58 +00003520 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
Nathan Scotta365bdd2006-03-14 13:34:16 +11003521 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3522 }
3523 /*
3524 * If we are not low on available data blocks, and the
3525 * underlying logical volume manager is a stripe, and
3526 * the file offset is zero then try to allocate data
3527 * blocks on stripe unit boundary.
3528 * NOTE: ap->aeof is only set if the allocation length
3529 * is >= the stripe unit and the allocation offset is
3530 * at the end of file.
3531 */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003532 if (!ap->dfops->dop_low && ap->aeof) {
Dave Chinner3a756672011-09-18 20:40:58 +00003533 if (!ap->offset) {
Dave Chinner33177f052013-12-12 16:34:36 +11003534 args.alignment = stripe_align;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003535 atype = args.type;
3536 isaligned = 1;
3537 /*
3538 * Adjust for alignment
3539 */
Dave Chinner14b064c2011-01-27 12:16:28 +11003540 if (blen > args.alignment && blen <= args.maxlen)
Nathan Scotta365bdd2006-03-14 13:34:16 +11003541 args.minlen = blen - args.alignment;
3542 args.minalignslop = 0;
3543 } else {
3544 /*
3545 * First try an exact bno allocation.
3546 * If it fails then do a near or start bno
3547 * allocation with alignment turned on.
3548 */
3549 atype = args.type;
3550 tryagain = 1;
3551 args.type = XFS_ALLOCTYPE_THIS_BNO;
3552 args.alignment = 1;
3553 /*
3554 * Compute the minlen+alignment for the
3555 * next case. Set slop so that the value
3556 * of minlen+alignment+slop doesn't go up
3557 * between the calls.
3558 */
Dave Chinner33177f052013-12-12 16:34:36 +11003559 if (blen > stripe_align && blen <= args.maxlen)
3560 nextminlen = blen - stripe_align;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003561 else
3562 nextminlen = args.minlen;
Dave Chinner33177f052013-12-12 16:34:36 +11003563 if (nextminlen + stripe_align > args.minlen + 1)
Nathan Scotta365bdd2006-03-14 13:34:16 +11003564 args.minalignslop =
Dave Chinner33177f052013-12-12 16:34:36 +11003565 nextminlen + stripe_align -
Nathan Scotta365bdd2006-03-14 13:34:16 +11003566 args.minlen - 1;
3567 else
3568 args.minalignslop = 0;
3569 }
3570 } else {
3571 args.alignment = 1;
3572 args.minalignslop = 0;
3573 }
3574 args.minleft = ap->minleft;
3575 args.wasdel = ap->wasdel;
Darrick J. Wong3fd129b2016-09-19 10:30:52 +10003576 args.resv = XFS_AG_RESV_NONE;
Dave Chinner292378e2016-09-26 08:21:28 +10003577 args.datatype = ap->datatype;
3578 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
Dave Chinner3fbbbea2015-11-03 12:27:22 +11003579 args.ip = ap->ip;
3580
3581 error = xfs_alloc_vextent(&args);
3582 if (error)
Nathan Scotta365bdd2006-03-14 13:34:16 +11003583 return error;
Dave Chinner3fbbbea2015-11-03 12:27:22 +11003584
Nathan Scotta365bdd2006-03-14 13:34:16 +11003585 if (tryagain && args.fsbno == NULLFSBLOCK) {
3586 /*
3587 * Exact allocation failed. Now try with alignment
3588 * turned on.
3589 */
3590 args.type = atype;
Dave Chinner3a756672011-09-18 20:40:58 +00003591 args.fsbno = ap->blkno;
Dave Chinner33177f052013-12-12 16:34:36 +11003592 args.alignment = stripe_align;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003593 args.minlen = nextminlen;
3594 args.minalignslop = 0;
3595 isaligned = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 if ((error = xfs_alloc_vextent(&args)))
3597 return error;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003598 }
3599 if (isaligned && args.fsbno == NULLFSBLOCK) {
3600 /*
3601 * allocation failed, so turn off alignment and
3602 * try again.
3603 */
3604 args.type = atype;
Dave Chinner3a756672011-09-18 20:40:58 +00003605 args.fsbno = ap->blkno;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003606 args.alignment = 0;
3607 if ((error = xfs_alloc_vextent(&args)))
3608 return error;
3609 }
3610 if (args.fsbno == NULLFSBLOCK && nullfb &&
3611 args.minlen > ap->minlen) {
3612 args.minlen = ap->minlen;
3613 args.type = XFS_ALLOCTYPE_START_BNO;
Dave Chinner3a756672011-09-18 20:40:58 +00003614 args.fsbno = ap->blkno;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003615 if ((error = xfs_alloc_vextent(&args)))
3616 return error;
3617 }
3618 if (args.fsbno == NULLFSBLOCK && nullfb) {
3619 args.fsbno = 0;
3620 args.type = XFS_ALLOCTYPE_FIRST_AG;
3621 args.total = ap->minlen;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003622 if ((error = xfs_alloc_vextent(&args)))
3623 return error;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10003624 ap->dfops->dop_low = true;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003625 }
3626 if (args.fsbno != NULLFSBLOCK) {
Dave Chinner0937e0f2011-09-18 20:40:57 +00003627 /*
3628 * check the allocation happened at the same or higher AG than
3629 * the first block that was allocated.
3630 */
3631 ASSERT(*ap->firstblock == NULLFSBLOCK ||
Christoph Hellwig410d17f2017-02-16 17:12:51 -08003632 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3633 XFS_FSB_TO_AGNO(mp, args.fsbno));
Dave Chinner0937e0f2011-09-18 20:40:57 +00003634
Dave Chinner3a756672011-09-18 20:40:58 +00003635 ap->blkno = args.fsbno;
Dave Chinner0937e0f2011-09-18 20:40:57 +00003636 if (*ap->firstblock == NULLFSBLOCK)
3637 *ap->firstblock = args.fsbno;
Christoph Hellwig410d17f2017-02-16 17:12:51 -08003638 ASSERT(nullfb || fb_agno <= args.agno);
Dave Chinner3a756672011-09-18 20:40:58 +00003639 ap->length = args.len;
Darrick J. Wong6d8a45c2018-01-19 17:47:36 -08003640 /*
3641 * If the extent size hint is active, we tried to round the
3642 * caller's allocation request offset down to extsz and the
3643 * length up to another extsz boundary. If we found a free
3644 * extent we mapped it in starting at this new offset. If the
3645 * newly mapped space isn't long enough to cover any of the
3646 * range of offsets that was originally requested, move the
3647 * mapping up so that we can fill as much of the caller's
3648 * original request as possible. Free space is apparently
3649 * very fragmented so we're unlikely to be able to satisfy the
3650 * hints anyway.
3651 */
3652 if (ap->length <= orig_length)
3653 ap->offset = orig_offset;
3654 else if (ap->offset + ap->length < orig_offset + orig_length)
3655 ap->offset = orig_offset + orig_length - ap->length;
Darrick J. Wong751f3762018-01-25 13:58:13 -08003656 xfs_bmap_btalloc_accounting(ap, &args);
Nathan Scotta365bdd2006-03-14 13:34:16 +11003657 } else {
Dave Chinner3a756672011-09-18 20:40:58 +00003658 ap->blkno = NULLFSBLOCK;
3659 ap->length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 }
3661 return 0;
Nathan Scotta365bdd2006-03-14 13:34:16 +11003662}
3663
3664/*
3665 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3666 * It figures out where to ask the underlying allocator to put the new extent.
3667 */
3668STATIC int
3669xfs_bmap_alloc(
Dave Chinner68988112013-08-12 20:49:42 +10003670 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
Nathan Scotta365bdd2006-03-14 13:34:16 +11003671{
Dave Chinner292378e2016-09-26 08:21:28 +10003672 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3673 xfs_alloc_is_userdata(ap->datatype))
Nathan Scotta365bdd2006-03-14 13:34:16 +11003674 return xfs_bmap_rtalloc(ap);
3675 return xfs_bmap_btalloc(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676}
3677
Darrick J. Wong0a0af282016-10-20 15:51:50 +11003678/* Trim extent to fit a logical block range. */
3679void
3680xfs_trim_extent(
3681 struct xfs_bmbt_irec *irec,
3682 xfs_fileoff_t bno,
3683 xfs_filblks_t len)
3684{
3685 xfs_fileoff_t distance;
3686 xfs_fileoff_t end = bno + len;
3687
3688 if (irec->br_startoff + irec->br_blockcount <= bno ||
3689 irec->br_startoff >= end) {
3690 irec->br_blockcount = 0;
3691 return;
3692 }
3693
3694 if (irec->br_startoff < bno) {
3695 distance = bno - irec->br_startoff;
3696 if (isnullstartblock(irec->br_startblock))
3697 irec->br_startblock = DELAYSTARTBLOCK;
3698 if (irec->br_startblock != DELAYSTARTBLOCK &&
3699 irec->br_startblock != HOLESTARTBLOCK)
3700 irec->br_startblock += distance;
3701 irec->br_startoff += distance;
3702 irec->br_blockcount -= distance;
3703 }
3704
3705 if (end < irec->br_startoff + irec->br_blockcount) {
3706 distance = irec->br_startoff + irec->br_blockcount - end;
3707 irec->br_blockcount -= distance;
3708 }
3709}
3710
Brian Foster40214d12017-10-13 09:47:46 -07003711/* trim extent to within eof */
3712void
3713xfs_trim_extent_eof(
3714 struct xfs_bmbt_irec *irec,
3715 struct xfs_inode *ip)
3716
3717{
3718 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3719 i_size_read(VFS_I(ip))));
3720}
3721
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722/*
Dave Chinneraef9a892011-09-18 20:40:44 +00003723 * Trim the returned map to the required bounds
3724 */
3725STATIC void
3726xfs_bmapi_trim_map(
3727 struct xfs_bmbt_irec *mval,
3728 struct xfs_bmbt_irec *got,
3729 xfs_fileoff_t *bno,
3730 xfs_filblks_t len,
3731 xfs_fileoff_t obno,
3732 xfs_fileoff_t end,
3733 int n,
3734 int flags)
3735{
3736 if ((flags & XFS_BMAPI_ENTIRE) ||
3737 got->br_startoff + got->br_blockcount <= obno) {
3738 *mval = *got;
3739 if (isnullstartblock(got->br_startblock))
3740 mval->br_startblock = DELAYSTARTBLOCK;
3741 return;
3742 }
3743
3744 if (obno > *bno)
3745 *bno = obno;
3746 ASSERT((*bno >= obno) || (n == 0));
3747 ASSERT(*bno < end);
3748 mval->br_startoff = *bno;
3749 if (isnullstartblock(got->br_startblock))
3750 mval->br_startblock = DELAYSTARTBLOCK;
3751 else
3752 mval->br_startblock = got->br_startblock +
3753 (*bno - got->br_startoff);
3754 /*
3755 * Return the minimum of what we got and what we asked for for
3756 * the length. We can use the len variable here because it is
3757 * modified below and we could have been there before coming
3758 * here if the first part of the allocation didn't overlap what
3759 * was asked for.
3760 */
3761 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3762 got->br_blockcount - (*bno - got->br_startoff));
3763 mval->br_state = got->br_state;
3764 ASSERT(mval->br_blockcount <= len);
3765 return;
3766}
3767
3768/*
3769 * Update and validate the extent map to return
3770 */
3771STATIC void
3772xfs_bmapi_update_map(
3773 struct xfs_bmbt_irec **map,
3774 xfs_fileoff_t *bno,
3775 xfs_filblks_t *len,
3776 xfs_fileoff_t obno,
3777 xfs_fileoff_t end,
3778 int *n,
3779 int flags)
3780{
3781 xfs_bmbt_irec_t *mval = *map;
3782
3783 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3784 ((mval->br_startoff + mval->br_blockcount) <= end));
3785 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3786 (mval->br_startoff < obno));
3787
3788 *bno = mval->br_startoff + mval->br_blockcount;
3789 *len = end - *bno;
3790 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3791 /* update previous map with new information */
3792 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3793 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3794 ASSERT(mval->br_state == mval[-1].br_state);
3795 mval[-1].br_blockcount = mval->br_blockcount;
3796 mval[-1].br_state = mval->br_state;
3797 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3798 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3799 mval[-1].br_startblock != HOLESTARTBLOCK &&
3800 mval->br_startblock == mval[-1].br_startblock +
3801 mval[-1].br_blockcount &&
3802 ((flags & XFS_BMAPI_IGSTATE) ||
3803 mval[-1].br_state == mval->br_state)) {
3804 ASSERT(mval->br_startoff ==
3805 mval[-1].br_startoff + mval[-1].br_blockcount);
3806 mval[-1].br_blockcount += mval->br_blockcount;
3807 } else if (*n > 0 &&
3808 mval->br_startblock == DELAYSTARTBLOCK &&
3809 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3810 mval->br_startoff ==
3811 mval[-1].br_startoff + mval[-1].br_blockcount) {
3812 mval[-1].br_blockcount += mval->br_blockcount;
3813 mval[-1].br_state = mval->br_state;
3814 } else if (!((*n == 0) &&
3815 ((mval->br_startoff + mval->br_blockcount) <=
3816 obno))) {
3817 mval++;
3818 (*n)++;
3819 }
3820 *map = mval;
3821}
3822
3823/*
Dave Chinner5c8ed202011-09-18 20:40:45 +00003824 * Map file blocks to filesystem blocks without allocation.
3825 */
3826int
3827xfs_bmapi_read(
3828 struct xfs_inode *ip,
3829 xfs_fileoff_t bno,
3830 xfs_filblks_t len,
3831 struct xfs_bmbt_irec *mval,
3832 int *nmap,
3833 int flags)
3834{
3835 struct xfs_mount *mp = ip->i_mount;
3836 struct xfs_ifork *ifp;
3837 struct xfs_bmbt_irec got;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003838 xfs_fileoff_t obno;
3839 xfs_fileoff_t end;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07003840 struct xfs_iext_cursor icur;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003841 int error;
Christoph Hellwig334f3422016-11-24 11:39:43 +11003842 bool eof = false;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003843 int n = 0;
Darrick J. Wong3993bae2016-10-03 09:11:32 -07003844 int whichfork = xfs_bmapi_whichfork(flags);
Dave Chinner5c8ed202011-09-18 20:40:45 +00003845
3846 ASSERT(*nmap >= 1);
3847 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
Darrick J. Wong3993bae2016-10-03 09:11:32 -07003848 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
Christoph Hellwigeef334e2013-12-06 12:30:17 -08003849 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
Dave Chinner5c8ed202011-09-18 20:40:45 +00003850
3851 if (unlikely(XFS_TEST_ERROR(
3852 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3853 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07003854 mp, XFS_ERRTAG_BMAPIFORMAT))) {
Dave Chinner5c8ed202011-09-18 20:40:45 +00003855 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +10003856 return -EFSCORRUPTED;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003857 }
3858
3859 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10003860 return -EIO;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003861
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11003862 XFS_STATS_INC(mp, xs_blk_mapr);
Dave Chinner5c8ed202011-09-18 20:40:45 +00003863
3864 ifp = XFS_IFORK_PTR(ip, whichfork);
Dave Chinner5c8ed202011-09-18 20:40:45 +00003865
Darrick J. Wong3993bae2016-10-03 09:11:32 -07003866 /* No CoW fork? Return a hole. */
3867 if (whichfork == XFS_COW_FORK && !ifp) {
3868 mval->br_startoff = bno;
3869 mval->br_startblock = HOLESTARTBLOCK;
3870 mval->br_blockcount = len;
3871 mval->br_state = XFS_EXT_NORM;
3872 *nmap = 1;
3873 return 0;
3874 }
3875
Dave Chinner5c8ed202011-09-18 20:40:45 +00003876 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3877 error = xfs_iread_extents(NULL, ip, whichfork);
3878 if (error)
3879 return error;
3880 }
3881
Christoph Hellwigb2b17122017-11-03 10:34:43 -07003882 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
Christoph Hellwig334f3422016-11-24 11:39:43 +11003883 eof = true;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003884 end = bno + len;
3885 obno = bno;
3886
3887 while (bno < end && n < *nmap) {
3888 /* Reading past eof, act as though there's a hole up to end. */
3889 if (eof)
3890 got.br_startoff = end;
3891 if (got.br_startoff > bno) {
3892 /* Reading in a hole. */
3893 mval->br_startoff = bno;
3894 mval->br_startblock = HOLESTARTBLOCK;
3895 mval->br_blockcount =
3896 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3897 mval->br_state = XFS_EXT_NORM;
3898 bno += mval->br_blockcount;
3899 len -= mval->br_blockcount;
3900 mval++;
3901 n++;
3902 continue;
3903 }
3904
3905 /* set up the extent map to return. */
3906 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3907 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3908
3909 /* If we're done, stop now. */
3910 if (bno >= end || n >= *nmap)
3911 break;
3912
3913 /* Else go on to the next record. */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07003914 if (!xfs_iext_next_extent(ifp, &icur, &got))
Christoph Hellwig334f3422016-11-24 11:39:43 +11003915 eof = true;
Dave Chinner5c8ed202011-09-18 20:40:45 +00003916 }
3917 *nmap = n;
3918 return 0;
3919}
3920
Brian Fosterf65e6fa2017-03-08 09:58:08 -08003921/*
3922 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3923 * global pool and the extent inserted into the inode in-core extent tree.
3924 *
3925 * On entry, got refers to the first extent beyond the offset of the extent to
3926 * allocate or eof is specified if no such extent exists. On return, got refers
3927 * to the extent record that was inserted to the inode fork.
3928 *
3929 * Note that the allocated extent may have been merged with contiguous extents
3930 * during insertion into the inode fork. Thus, got does not reflect the current
3931 * state of the inode fork on return. If necessary, the caller can use lastx to
3932 * look up the updated record in the inode fork.
3933 */
Christoph Hellwig51446f52016-09-19 11:10:21 +10003934int
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003935xfs_bmapi_reserve_delalloc(
3936 struct xfs_inode *ip,
Darrick J. Wongbe51f812016-10-03 09:11:32 -07003937 int whichfork,
Brian Foster974ae922016-11-28 14:57:42 +11003938 xfs_fileoff_t off,
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003939 xfs_filblks_t len,
Brian Foster974ae922016-11-28 14:57:42 +11003940 xfs_filblks_t prealloc,
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003941 struct xfs_bmbt_irec *got,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07003942 struct xfs_iext_cursor *icur,
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003943 int eof)
3944{
3945 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wongbe51f812016-10-03 09:11:32 -07003946 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003947 xfs_extlen_t alen;
3948 xfs_extlen_t indlen;
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003949 int error;
Brian Foster974ae922016-11-28 14:57:42 +11003950 xfs_fileoff_t aoff = off;
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003951
Brian Foster974ae922016-11-28 14:57:42 +11003952 /*
3953 * Cap the alloc length. Keep track of prealloc so we know whether to
3954 * tag the inode before we return.
3955 */
3956 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003957 if (!eof)
3958 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
Brian Foster974ae922016-11-28 14:57:42 +11003959 if (prealloc && alen >= len)
3960 prealloc = alen - len;
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003961
3962 /* Figure out the extent size, adjust alen */
Shan Hai6ca30722018-01-23 13:56:11 -08003963 if (whichfork == XFS_COW_FORK) {
Christoph Hellwig65c5f412016-11-24 11:39:44 +11003964 struct xfs_bmbt_irec prev;
Shan Hai6ca30722018-01-23 13:56:11 -08003965 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
Christoph Hellwig65c5f412016-11-24 11:39:44 +11003966
Christoph Hellwigb2b17122017-11-03 10:34:43 -07003967 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
Christoph Hellwig65c5f412016-11-24 11:39:44 +11003968 prev.br_startoff = NULLFILEOFF;
3969
Shan Hai6ca30722018-01-23 13:56:11 -08003970 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003971 1, 0, &aoff, &alen);
3972 ASSERT(!error);
3973 }
3974
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003975 /*
3976 * Make a transaction-less quota reservation for delayed allocation
3977 * blocks. This number gets adjusted later. We return if we haven't
3978 * allocated blocks already inside this loop.
3979 */
3980 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
Shan Hai6ca30722018-01-23 13:56:11 -08003981 XFS_QMOPT_RES_REGBLKS);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003982 if (error)
3983 return error;
3984
3985 /*
3986 * Split changing sb for alen and indlen since they could be coming
3987 * from different places.
3988 */
3989 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3990 ASSERT(indlen > 0);
3991
Shan Hai6ca30722018-01-23 13:56:11 -08003992 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003993 if (error)
3994 goto out_unreserve_quota;
3995
Dave Chinner0d485ad2015-02-23 21:22:03 +11003996 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00003997 if (error)
3998 goto out_unreserve_blocks;
3999
4000
4001 ip->i_delayed_blks += alen;
4002
4003 got->br_startoff = aoff;
4004 got->br_startblock = nullstartblock(indlen);
4005 got->br_blockcount = alen;
4006 got->br_state = XFS_EXT_NORM;
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00004007
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004008 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00004009
Brian Foster974ae922016-11-28 14:57:42 +11004010 /*
4011 * Tag the inode if blocks were preallocated. Note that COW fork
4012 * preallocation can occur at the start or end of the extent, even when
4013 * prealloc == 0, so we must also check the aligned offset and length.
4014 */
4015 if (whichfork == XFS_DATA_FORK && prealloc)
4016 xfs_inode_set_eofblocks_tag(ip);
4017 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4018 xfs_inode_set_cowblocks_tag(ip);
4019
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00004020 return 0;
4021
4022out_unreserve_blocks:
Shan Hai6ca30722018-01-23 13:56:11 -08004023 xfs_mod_fdblocks(mp, alen, false);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00004024out_unreserve_quota:
4025 if (XFS_IS_QUOTA_ON(mp))
Shan Hai6ca30722018-01-23 13:56:11 -08004026 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4027 XFS_QMOPT_RES_REGBLKS);
Christoph Hellwigb64dfe42011-09-18 20:40:47 +00004028 return error;
4029}
4030
Dave Chinnercf11da92014-07-15 07:08:24 +10004031static int
4032xfs_bmapi_allocate(
Dave Chinnere04426b2012-10-05 11:06:59 +10004033 struct xfs_bmalloca *bma)
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004034{
4035 struct xfs_mount *mp = bma->ip->i_mount;
Darrick J. Wong60b49842016-10-03 09:11:34 -07004036 int whichfork = xfs_bmapi_whichfork(bma->flags);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004037 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
Christoph Hellwigc315c902011-09-18 20:41:02 +00004038 int tmp_logflags = 0;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004039 int error;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004040
Dave Chinnera99ebf42011-12-01 11:24:20 +00004041 ASSERT(bma->length > 0);
4042
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004043 /*
4044 * For the wasdelay case, we could also just allocate the stuff asked
4045 * for in this bmap call but that wouldn't be as good.
4046 */
4047 if (bma->wasdel) {
Dave Chinner963c30c2011-09-18 20:40:59 +00004048 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4049 bma->offset = bma->got.br_startoff;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004050 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004051 } else {
Dave Chinner963c30c2011-09-18 20:40:59 +00004052 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004053 if (!bma->eof)
Dave Chinner963c30c2011-09-18 20:40:59 +00004054 bma->length = XFS_FILBLKS_MIN(bma->length,
Dave Chinner3a756672011-09-18 20:40:58 +00004055 bma->got.br_startoff - bma->offset);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004056 }
4057
4058 /*
Dave Chinner292378e2016-09-26 08:21:28 +10004059 * Set the data type being allocated. For the data fork, the first data
4060 * in the file is treated differently to all other allocations. For the
4061 * attribute fork, we only need to ensure the allocated range is not on
4062 * the busy list.
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004063 */
Dave Chinnere04426b2012-10-05 11:06:59 +10004064 if (!(bma->flags & XFS_BMAPI_METADATA)) {
Dave Chinner292378e2016-09-26 08:21:28 +10004065 bma->datatype = XFS_ALLOC_NOBUSY;
4066 if (whichfork == XFS_DATA_FORK) {
4067 if (bma->offset == 0)
4068 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4069 else
4070 bma->datatype |= XFS_ALLOC_USERDATA;
4071 }
Dave Chinner3fbbbea2015-11-03 12:27:22 +11004072 if (bma->flags & XFS_BMAPI_ZERO)
Dave Chinner292378e2016-09-26 08:21:28 +10004073 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004074 }
4075
Dave Chinnere04426b2012-10-05 11:06:59 +10004076 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004077
4078 /*
4079 * Only want to do the alignment at the eof if it is userdata and
4080 * allocation length is larger than a stripe unit.
4081 */
Dave Chinner963c30c2011-09-18 20:40:59 +00004082 if (mp->m_dalign && bma->length >= mp->m_dalign &&
Dave Chinnere04426b2012-10-05 11:06:59 +10004083 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
Dave Chinner1b164472011-09-18 20:40:55 +00004084 error = xfs_bmap_isaeof(bma, whichfork);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004085 if (error)
4086 return error;
4087 }
4088
4089 error = xfs_bmap_alloc(bma);
4090 if (error)
4091 return error;
4092
Dave Chinner29c8d172011-09-18 20:41:00 +00004093 if (bma->cur)
4094 bma->cur->bc_private.b.firstblock = *bma->firstblock;
Dave Chinner963c30c2011-09-18 20:40:59 +00004095 if (bma->blkno == NULLFSBLOCK)
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004096 return 0;
Dave Chinner29c8d172011-09-18 20:41:00 +00004097 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4098 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4099 bma->cur->bc_private.b.firstblock = *bma->firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10004100 bma->cur->bc_private.b.dfops = bma->dfops;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004101 }
4102 /*
4103 * Bump the number of extents we've allocated
4104 * in this call.
4105 */
Dave Chinnere0c3da52011-09-18 20:41:01 +00004106 bma->nallocs++;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004107
Dave Chinner29c8d172011-09-18 20:41:00 +00004108 if (bma->cur)
4109 bma->cur->bc_private.b.flags =
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004110 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4111
Dave Chinner963c30c2011-09-18 20:40:59 +00004112 bma->got.br_startoff = bma->offset;
4113 bma->got.br_startblock = bma->blkno;
4114 bma->got.br_blockcount = bma->length;
Dave Chinnerbaf41a52011-09-18 20:40:56 +00004115 bma->got.br_state = XFS_EXT_NORM;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004116
4117 /*
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004118 * In the data fork, a wasdelay extent has been initialized, so
4119 * shouldn't be flagged as unwritten.
4120 *
4121 * For the cow fork, however, we convert delalloc reservations
4122 * (extents allocated for speculative preallocation) to
4123 * allocated unwritten extents, and only convert the unwritten
4124 * extents to real extents when we're about to write the data.
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004125 */
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004126 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4127 (bma->flags & XFS_BMAPI_PREALLOC) &&
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004128 xfs_sb_version_hasextflgbit(&mp->m_sb))
Dave Chinnerbaf41a52011-09-18 20:40:56 +00004129 bma->got.br_state = XFS_EXT_UNWRITTEN;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004130
Christoph Hellwigc6534242011-09-18 20:41:05 +00004131 if (bma->wasdel)
Darrick J. Wong60b49842016-10-03 09:11:34 -07004132 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
Christoph Hellwigc6534242011-09-18 20:41:05 +00004133 else
Christoph Hellwig6d045582017-04-11 16:45:54 -07004134 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004135 whichfork, &bma->icur, &bma->cur, &bma->got,
Darrick J. Wong95eb3082018-05-09 10:02:32 -07004136 bma->firstblock, bma->dfops, &bma->logflags,
4137 bma->flags);
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00004138
Christoph Hellwigc315c902011-09-18 20:41:02 +00004139 bma->logflags |= tmp_logflags;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004140 if (error)
4141 return error;
4142
4143 /*
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00004144 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4145 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4146 * the neighbouring ones.
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004147 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004148 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004149
Dave Chinner963c30c2011-09-18 20:40:59 +00004150 ASSERT(bma->got.br_startoff <= bma->offset);
4151 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4152 bma->offset + bma->length);
Dave Chinnerbaf41a52011-09-18 20:40:56 +00004153 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4154 bma->got.br_state == XFS_EXT_UNWRITTEN);
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004155 return 0;
4156}
4157
Dave Chinnerb447fe52011-09-18 20:40:51 +00004158STATIC int
4159xfs_bmapi_convert_unwritten(
4160 struct xfs_bmalloca *bma,
4161 struct xfs_bmbt_irec *mval,
4162 xfs_filblks_t len,
Christoph Hellwigc315c902011-09-18 20:41:02 +00004163 int flags)
Dave Chinnerb447fe52011-09-18 20:40:51 +00004164{
Darrick J. Wong3993bae2016-10-03 09:11:32 -07004165 int whichfork = xfs_bmapi_whichfork(flags);
Dave Chinnerb447fe52011-09-18 20:40:51 +00004166 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
Christoph Hellwigc315c902011-09-18 20:41:02 +00004167 int tmp_logflags = 0;
Dave Chinnerb447fe52011-09-18 20:40:51 +00004168 int error;
4169
Dave Chinnerb447fe52011-09-18 20:40:51 +00004170 /* check if we need to do unwritten->real conversion */
4171 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4172 (flags & XFS_BMAPI_PREALLOC))
4173 return 0;
4174
4175 /* check if we need to do real->unwritten conversion */
4176 if (mval->br_state == XFS_EXT_NORM &&
4177 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4178 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4179 return 0;
4180
4181 /*
4182 * Modify (by adding) the state flag, if writing.
4183 */
4184 ASSERT(mval->br_blockcount <= len);
Dave Chinner29c8d172011-09-18 20:41:00 +00004185 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4186 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
Dave Chinnerb447fe52011-09-18 20:40:51 +00004187 bma->ip, whichfork);
Dave Chinner29c8d172011-09-18 20:41:00 +00004188 bma->cur->bc_private.b.firstblock = *bma->firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10004189 bma->cur->bc_private.b.dfops = bma->dfops;
Dave Chinnerb447fe52011-09-18 20:40:51 +00004190 }
4191 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4192 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4193
Dave Chinner3fbbbea2015-11-03 12:27:22 +11004194 /*
4195 * Before insertion into the bmbt, zero the range being converted
4196 * if required.
4197 */
4198 if (flags & XFS_BMAPI_ZERO) {
4199 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4200 mval->br_blockcount);
4201 if (error)
4202 return error;
4203 }
4204
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004205 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004206 &bma->icur, &bma->cur, mval, bma->firstblock,
4207 bma->dfops, &tmp_logflags);
Brian Foster2e588a42015-06-01 07:15:23 +10004208 /*
4209 * Log the inode core unconditionally in the unwritten extent conversion
4210 * path because the conversion might not have done so (e.g., if the
4211 * extent count hasn't changed). We need to make sure the inode is dirty
4212 * in the transaction for the sake of fsync(), even if nothing has
4213 * changed, because fsync() will not force the log for this transaction
4214 * unless it sees the inode pinned.
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004215 *
4216 * Note: If we're only converting cow fork extents, there aren't
4217 * any on-disk updates to make, so we don't need to log anything.
Brian Foster2e588a42015-06-01 07:15:23 +10004218 */
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004219 if (whichfork != XFS_COW_FORK)
4220 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
Dave Chinnerb447fe52011-09-18 20:40:51 +00004221 if (error)
4222 return error;
4223
4224 /*
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00004225 * Update our extent pointer, given that
4226 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4227 * of the neighbouring ones.
Dave Chinnerb447fe52011-09-18 20:40:51 +00004228 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004229 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
Dave Chinnerb447fe52011-09-18 20:40:51 +00004230
4231 /*
4232 * We may have combined previously unwritten space with written space,
4233 * so generate another request.
4234 */
4235 if (mval->br_blockcount < len)
Dave Chinner24513372014-06-25 14:58:08 +10004236 return -EAGAIN;
Dave Chinnerb447fe52011-09-18 20:40:51 +00004237 return 0;
4238}
4239
Christoph Hellwig44032802011-09-18 20:40:48 +00004240/*
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004241 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4242 * extent state if necessary. Details behaviour is controlled by the flags
4243 * parameter. Only allocates blocks from a single allocation group, to avoid
4244 * locking problems.
4245 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 * The returned value in "firstblock" from the first call in a transaction
4247 * must be remembered and presented to subsequent calls in "firstblock".
4248 * An upper bound for the number of blocks to be allocated is supplied to
4249 * the first call in "total"; if no allocation group has that many free
4250 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4251 */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004252int
4253xfs_bmapi_write(
4254 struct xfs_trans *tp, /* transaction pointer */
4255 struct xfs_inode *ip, /* incore inode */
4256 xfs_fileoff_t bno, /* starting file offs. mapped */
4257 xfs_filblks_t len, /* length to map in file */
4258 int flags, /* XFS_BMAPI_... */
4259 xfs_fsblock_t *firstblock, /* first allocated block
4260 controls a.g. for allocs */
4261 xfs_extlen_t total, /* total blocks needed */
4262 struct xfs_bmbt_irec *mval, /* output: map values */
4263 int *nmap, /* i/o: mval size/count */
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10004264 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265{
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004266 struct xfs_mount *mp = ip->i_mount;
4267 struct xfs_ifork *ifp;
Dave Chinnera30b0362013-09-02 20:49:36 +10004268 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004269 xfs_fileoff_t end; /* end of mapped file region */
Christoph Hellwig2d58f6e2016-11-24 11:39:43 +11004270 bool eof = false; /* after the end of extents */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004271 int error; /* error return */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004272 int n; /* current extent index */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004273 xfs_fileoff_t obno; /* old block number (offset) */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004274 int whichfork; /* data or attr fork */
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276#ifdef DEBUG
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004277 xfs_fileoff_t orig_bno; /* original block number value */
4278 int orig_flags; /* original flags arg value */
4279 xfs_filblks_t orig_len; /* original value of len arg */
4280 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4281 int orig_nmap; /* original value of *nmap */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282
4283 orig_bno = bno;
4284 orig_len = len;
4285 orig_flags = flags;
4286 orig_mval = mval;
4287 orig_nmap = *nmap;
4288#endif
Darrick J. Wong60b49842016-10-03 09:11:34 -07004289 whichfork = xfs_bmapi_whichfork(flags);
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 ASSERT(*nmap >= 1);
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004292 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4293 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
Darrick J. Wong05a630d2017-02-02 15:14:01 -08004294 ASSERT(tp != NULL ||
4295 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4296 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
Dave Chinnera99ebf42011-12-01 11:24:20 +00004297 ASSERT(len > 0);
Dave Chinnerf3508bc2013-07-10 07:04:00 +10004298 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
Christoph Hellwigeef334e2013-12-06 12:30:17 -08004299 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004300 ASSERT(!(flags & XFS_BMAPI_REMAP));
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004301
Dave Chinner3fbbbea2015-11-03 12:27:22 +11004302 /* zeroing is for currently only for data extents, not metadata */
4303 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4304 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4305 /*
4306 * we can allocate unwritten extents or pre-zero allocated blocks,
4307 * but it makes no sense to do both at once. This would result in
4308 * zeroing the unwritten extent twice, but it still being an
4309 * unwritten extent....
4310 */
4311 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4312 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4313
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 if (unlikely(XFS_TEST_ERROR(
4315 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
Dave Chinnerf3508bc2013-07-10 07:04:00 +10004316 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07004317 mp, XFS_ERRTAG_BMAPIFORMAT))) {
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004318 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +10004319 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320 }
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10004323 return -EIO;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004324
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 ifp = XFS_IFORK_PTR(ip, whichfork);
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004326
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11004327 XFS_STATS_INC(mp, xs_blk_mapw);
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004328
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004329 if (*firstblock == NULLFSBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
Dave Chinner0937e0f2011-09-18 20:40:57 +00004331 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 else
Dave Chinner0937e0f2011-09-18 20:40:57 +00004333 bma.minleft = 1;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004334 } else {
Dave Chinner0937e0f2011-09-18 20:40:57 +00004335 bma.minleft = 0;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004336 }
4337
4338 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4339 error = xfs_iread_extents(tp, ip, whichfork);
4340 if (error)
4341 goto error0;
4342 }
4343
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 n = 0;
4345 end = bno + len;
4346 obno = bno;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004347
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004348 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
Christoph Hellwig2d58f6e2016-11-24 11:39:43 +11004349 eof = true;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004350 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
Christoph Hellwig2d58f6e2016-11-24 11:39:43 +11004351 bma.prev.br_startoff = NULLFILEOFF;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004352 bma.tp = tp;
4353 bma.ip = ip;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004354 bma.total = total;
Dave Chinner292378e2016-09-26 08:21:28 +10004355 bma.datatype = 0;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10004356 bma.dfops = dfops;
Dave Chinner0937e0f2011-09-18 20:40:57 +00004357 bma.firstblock = firstblock;
Christoph Hellwigb4e91812010-06-23 18:11:15 +10004358
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 while (bno < end && n < *nmap) {
Christoph Hellwigd2b39642017-01-20 09:31:54 -08004360 bool need_alloc = false, wasdelay = false;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004361
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08004362 /* in hole or beyond EOF? */
Christoph Hellwigd2b39642017-01-20 09:31:54 -08004363 if (eof || bma.got.br_startoff > bno) {
Darrick J. Wongbe78ff02018-01-16 19:03:59 -08004364 /*
4365 * CoW fork conversions should /never/ hit EOF or
4366 * holes. There should always be something for us
4367 * to work on.
4368 */
4369 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4370 (flags & XFS_BMAPI_COWFORK)));
4371
Christoph Hellwigd2b39642017-01-20 09:31:54 -08004372 if (flags & XFS_BMAPI_DELALLOC) {
4373 /*
4374 * For the COW fork we can reasonably get a
4375 * request for converting an extent that races
4376 * with other threads already having converted
4377 * part of it, as there converting COW to
4378 * regular blocks is not protected using the
4379 * IOLOCK.
4380 */
4381 ASSERT(flags & XFS_BMAPI_COWFORK);
4382 if (!(flags & XFS_BMAPI_COWFORK)) {
4383 error = -EIO;
4384 goto error0;
4385 }
4386
4387 if (eof || bno >= end)
4388 break;
4389 } else {
4390 need_alloc = true;
4391 }
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004392 } else if (isnullstartblock(bma.got.br_startblock)) {
4393 wasdelay = true;
Christoph Hellwigd2b39642017-01-20 09:31:54 -08004394 }
Darrick J. Wongf65306e2016-10-03 09:11:27 -07004395
4396 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 * First, deal with the hole before the allocated space
4398 * that we found, if any.
4399 */
Christoph Hellwigb1214592017-11-03 10:34:44 -07004400 if ((need_alloc || wasdelay) &&
4401 !(flags & XFS_BMAPI_CONVERT_ONLY)) {
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004402 bma.eof = eof;
4403 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4404 bma.wasdel = wasdelay;
Dave Chinner3a756672011-09-18 20:40:58 +00004405 bma.offset = bno;
Dave Chinnere04426b2012-10-05 11:06:59 +10004406 bma.flags = flags;
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004407
Dave Chinnera99ebf42011-12-01 11:24:20 +00004408 /*
4409 * There's a 32/64 bit type mismatch between the
4410 * allocation length request (which can be 64 bits in
4411 * length) and the bma length request, which is
4412 * xfs_extlen_t and therefore 32 bits. Hence we have to
4413 * check for 32-bit overflows and handle them here.
4414 */
4415 if (len > (xfs_filblks_t)MAXEXTLEN)
4416 bma.length = MAXEXTLEN;
4417 else
4418 bma.length = len;
4419
4420 ASSERT(len > 0);
4421 ASSERT(bma.length > 0);
Dave Chinnere04426b2012-10-05 11:06:59 +10004422 error = xfs_bmapi_allocate(&bma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 if (error)
4424 goto error0;
Dave Chinner3a756672011-09-18 20:40:58 +00004425 if (bma.blkno == NULLFSBLOCK)
Dave Chinner7e47a4e2011-09-18 20:40:50 +00004426 break;
Darrick J. Wong174edb02016-10-03 09:11:39 -07004427
4428 /*
4429 * If this is a CoW allocation, record the data in
4430 * the refcount btree for orphan recovery.
4431 */
4432 if (whichfork == XFS_COW_FORK) {
4433 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4434 bma.blkno, bma.length);
4435 if (error)
4436 goto error0;
4437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 }
Christoph Hellwig44032802011-09-18 20:40:48 +00004439
Dave Chinneraef9a892011-09-18 20:40:44 +00004440 /* Deal with the allocated space we found. */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00004441 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4442 end, n, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
Dave Chinnerb447fe52011-09-18 20:40:51 +00004444 /* Execute unwritten extent conversion if necessary */
Christoph Hellwigc315c902011-09-18 20:41:02 +00004445 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
Dave Chinner24513372014-06-25 14:58:08 +10004446 if (error == -EAGAIN)
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004447 continue;
4448 if (error)
4449 goto error0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450
Dave Chinneraef9a892011-09-18 20:40:44 +00004451 /* update the extent map to return */
4452 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4453
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 /*
4455 * If we're done, stop now. Stop when we've allocated
4456 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4457 * the transaction may get too big.
4458 */
Dave Chinnere0c3da52011-09-18 20:41:01 +00004459 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 break;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004461
4462 /* Else go on to the next record. */
Dave Chinnerbaf41a52011-09-18 20:40:56 +00004463 bma.prev = bma.got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004464 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
Christoph Hellwig2d58f6e2016-11-24 11:39:43 +11004465 eof = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 *nmap = n;
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004468
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 /*
4470 * Transform from btree to extents, give it cur.
4471 */
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00004472 if (xfs_bmap_wants_extents(ip, whichfork)) {
Christoph Hellwigc315c902011-09-18 20:41:02 +00004473 int tmp_logflags = 0;
4474
Dave Chinner29c8d172011-09-18 20:41:00 +00004475 ASSERT(bma.cur);
4476 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 &tmp_logflags, whichfork);
Christoph Hellwigc315c902011-09-18 20:41:02 +00004478 bma.logflags |= tmp_logflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 if (error)
4480 goto error0;
4481 }
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00004482
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00004484 XFS_IFORK_NEXTENTS(ip, whichfork) >
4485 XFS_IFORK_MAXEXT(ip, whichfork));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487error0:
4488 /*
4489 * Log everything. Do this after conversion, there's no point in
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11004490 * logging the extent records if we've converted to btree format.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 */
Christoph Hellwigc315c902011-09-18 20:41:02 +00004492 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
Christoph Hellwigc315c902011-09-18 20:41:02 +00004494 bma.logflags &= ~xfs_ilog_fext(whichfork);
4495 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
Christoph Hellwigc315c902011-09-18 20:41:02 +00004497 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 /*
4499 * Log whatever the flags say, even if error. Otherwise we might miss
4500 * detecting a case where the data is changed, there's an error,
4501 * and it's not logged so we don't shutdown when we should.
4502 */
Christoph Hellwigc315c902011-09-18 20:41:02 +00004503 if (bma.logflags)
4504 xfs_trans_log_inode(tp, ip, bma.logflags);
Dave Chinnerc0dc7822011-09-18 20:40:52 +00004505
Dave Chinner29c8d172011-09-18 20:41:00 +00004506 if (bma.cur) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 if (!error) {
4508 ASSERT(*firstblock == NULLFSBLOCK ||
Christoph Hellwig410d17f2017-02-16 17:12:51 -08004509 XFS_FSB_TO_AGNO(mp, *firstblock) <=
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 XFS_FSB_TO_AGNO(mp,
Christoph Hellwig410d17f2017-02-16 17:12:51 -08004511 bma.cur->bc_private.b.firstblock));
Dave Chinner29c8d172011-09-18 20:41:00 +00004512 *firstblock = bma.cur->bc_private.b.firstblock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 }
Dave Chinner29c8d172011-09-18 20:41:00 +00004514 xfs_btree_del_cursor(bma.cur,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4516 }
4517 if (!error)
4518 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4519 orig_nmap, *nmap);
4520 return error;
4521}
4522
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004523static int
4524xfs_bmapi_remap(
4525 struct xfs_trans *tp,
4526 struct xfs_inode *ip,
4527 xfs_fileoff_t bno,
4528 xfs_filblks_t len,
4529 xfs_fsblock_t startblock,
4530 struct xfs_defer_ops *dfops)
4531{
4532 struct xfs_mount *mp = ip->i_mount;
4533 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4534 struct xfs_btree_cur *cur = NULL;
4535 xfs_fsblock_t firstblock = NULLFSBLOCK;
4536 struct xfs_bmbt_irec got;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004537 struct xfs_iext_cursor icur;
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004538 int logflags = 0, error;
4539
4540 ASSERT(len > 0);
4541 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4542 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4543
4544 if (unlikely(XFS_TEST_ERROR(
4545 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4546 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07004547 mp, XFS_ERRTAG_BMAPIFORMAT))) {
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004548 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4549 return -EFSCORRUPTED;
4550 }
4551
4552 if (XFS_FORCED_SHUTDOWN(mp))
4553 return -EIO;
4554
4555 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4556 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4557 if (error)
4558 return error;
4559 }
4560
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004561 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004562 /* make sure we only reflink into a hole. */
4563 ASSERT(got.br_startoff > bno);
4564 ASSERT(got.br_startoff - bno >= len);
4565 }
4566
Christoph Hellwigbf8eadb2017-04-11 16:45:56 -07004567 ip->i_d.di_nblocks += len;
4568 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004569
4570 if (ifp->if_flags & XFS_IFBROOT) {
4571 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4572 cur->bc_private.b.firstblock = firstblock;
4573 cur->bc_private.b.dfops = dfops;
4574 cur->bc_private.b.flags = 0;
4575 }
4576
4577 got.br_startoff = bno;
4578 got.br_startblock = startblock;
4579 got.br_blockcount = len;
4580 got.br_state = XFS_EXT_NORM;
4581
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004582 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &icur,
Darrick J. Wong95eb3082018-05-09 10:02:32 -07004583 &cur, &got, &firstblock, dfops, &logflags, 0);
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07004584 if (error)
4585 goto error0;
4586
4587 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4588 int tmp_logflags = 0;
4589
4590 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4591 &tmp_logflags, XFS_DATA_FORK);
4592 logflags |= tmp_logflags;
4593 }
4594
4595error0:
4596 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4597 logflags &= ~XFS_ILOG_DEXT;
4598 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4599 logflags &= ~XFS_ILOG_DBROOT;
4600
4601 if (logflags)
4602 xfs_trans_log_inode(tp, ip, logflags);
4603 if (cur) {
4604 xfs_btree_del_cursor(cur,
4605 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4606 }
4607 return error;
4608}
4609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610/*
Brian Fostera9bd24a2016-03-15 11:42:46 +11004611 * When a delalloc extent is split (e.g., due to a hole punch), the original
4612 * indlen reservation must be shared across the two new extents that are left
4613 * behind.
4614 *
4615 * Given the original reservation and the worst case indlen for the two new
4616 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
Brian Fosterd34999c2016-03-15 11:42:47 +11004617 * reservation fairly across the two new extents. If necessary, steal available
4618 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4619 * ores == 1). The number of stolen blocks is returned. The availability and
4620 * subsequent accounting of stolen blocks is the responsibility of the caller.
Brian Fostera9bd24a2016-03-15 11:42:46 +11004621 */
Brian Fosterd34999c2016-03-15 11:42:47 +11004622static xfs_filblks_t
Brian Fostera9bd24a2016-03-15 11:42:46 +11004623xfs_bmap_split_indlen(
4624 xfs_filblks_t ores, /* original res. */
4625 xfs_filblks_t *indlen1, /* ext1 worst indlen */
Brian Fosterd34999c2016-03-15 11:42:47 +11004626 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4627 xfs_filblks_t avail) /* stealable blocks */
Brian Fostera9bd24a2016-03-15 11:42:46 +11004628{
4629 xfs_filblks_t len1 = *indlen1;
4630 xfs_filblks_t len2 = *indlen2;
4631 xfs_filblks_t nres = len1 + len2; /* new total res. */
Brian Fosterd34999c2016-03-15 11:42:47 +11004632 xfs_filblks_t stolen = 0;
Brian Foster75d65362017-02-13 22:48:30 -08004633 xfs_filblks_t resfactor;
Brian Fostera9bd24a2016-03-15 11:42:46 +11004634
4635 /*
Brian Fosterd34999c2016-03-15 11:42:47 +11004636 * Steal as many blocks as we can to try and satisfy the worst case
4637 * indlen for both new extents.
4638 */
Brian Foster75d65362017-02-13 22:48:30 -08004639 if (ores < nres && avail)
4640 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4641 ores += stolen;
4642
4643 /* nothing else to do if we've satisfied the new reservation */
4644 if (ores >= nres)
4645 return stolen;
Brian Fosterd34999c2016-03-15 11:42:47 +11004646
4647 /*
Brian Foster75d65362017-02-13 22:48:30 -08004648 * We can't meet the total required reservation for the two extents.
4649 * Calculate the percent of the overall shortage between both extents
4650 * and apply this percentage to each of the requested indlen values.
4651 * This distributes the shortage fairly and reduces the chances that one
4652 * of the two extents is left with nothing when extents are repeatedly
4653 * split.
Brian Fostera9bd24a2016-03-15 11:42:46 +11004654 */
Brian Foster75d65362017-02-13 22:48:30 -08004655 resfactor = (ores * 100);
4656 do_div(resfactor, nres);
4657 len1 *= resfactor;
4658 do_div(len1, 100);
4659 len2 *= resfactor;
4660 do_div(len2, 100);
4661 ASSERT(len1 + len2 <= ores);
4662 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4663
4664 /*
4665 * Hand out the remainder to each extent. If one of the two reservations
4666 * is zero, we want to make sure that one gets a block first. The loop
4667 * below starts with len1, so hand len2 a block right off the bat if it
4668 * is zero.
4669 */
4670 ores -= (len1 + len2);
4671 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4672 if (ores && !len2 && *indlen2) {
4673 len2++;
4674 ores--;
4675 }
4676 while (ores) {
4677 if (len1 < *indlen1) {
4678 len1++;
4679 ores--;
Brian Fostera9bd24a2016-03-15 11:42:46 +11004680 }
Brian Foster75d65362017-02-13 22:48:30 -08004681 if (!ores)
Brian Fostera9bd24a2016-03-15 11:42:46 +11004682 break;
Brian Foster75d65362017-02-13 22:48:30 -08004683 if (len2 < *indlen2) {
4684 len2++;
4685 ores--;
Brian Fostera9bd24a2016-03-15 11:42:46 +11004686 }
4687 }
4688
4689 *indlen1 = len1;
4690 *indlen2 = len2;
Brian Fosterd34999c2016-03-15 11:42:47 +11004691
4692 return stolen;
Brian Fostera9bd24a2016-03-15 11:42:46 +11004693}
4694
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004695int
4696xfs_bmap_del_extent_delay(
4697 struct xfs_inode *ip,
4698 int whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004699 struct xfs_iext_cursor *icur,
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004700 struct xfs_bmbt_irec *got,
4701 struct xfs_bmbt_irec *del)
4702{
4703 struct xfs_mount *mp = ip->i_mount;
4704 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4705 struct xfs_bmbt_irec new;
4706 int64_t da_old, da_new, da_diff = 0;
4707 xfs_fileoff_t del_endoff, got_endoff;
4708 xfs_filblks_t got_indlen, new_indlen, stolen;
Christoph Hellwig060ea652017-10-19 11:02:29 -07004709 int state = xfs_bmap_fork_to_state(whichfork);
4710 int error = 0;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004711 bool isrt;
4712
4713 XFS_STATS_INC(mp, xs_del_exlist);
4714
4715 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4716 del_endoff = del->br_startoff + del->br_blockcount;
4717 got_endoff = got->br_startoff + got->br_blockcount;
4718 da_old = startblockval(got->br_startblock);
4719 da_new = 0;
4720
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004721 ASSERT(del->br_blockcount > 0);
4722 ASSERT(got->br_startoff <= del->br_startoff);
4723 ASSERT(got_endoff >= del_endoff);
4724
4725 if (isrt) {
Eric Sandeen4f1adf32017-04-19 15:19:32 -07004726 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004727
4728 do_div(rtexts, mp->m_sb.sb_rextsize);
4729 xfs_mod_frextents(mp, rtexts);
4730 }
4731
4732 /*
4733 * Update the inode delalloc counter now and wait to update the
4734 * sb counters as we might have to borrow some blocks for the
4735 * indirect block accounting.
4736 */
Darrick J. Wong4fd29ec42016-11-08 11:59:26 +11004737 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4738 -((long)del->br_blockcount), 0,
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004739 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
Darrick J. Wong4fd29ec42016-11-08 11:59:26 +11004740 if (error)
4741 return error;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004742 ip->i_delayed_blks -= del->br_blockcount;
4743
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004744 if (got->br_startoff == del->br_startoff)
Christoph Hellwig0173c682017-10-17 14:16:22 -07004745 state |= BMAP_LEFT_FILLING;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004746 if (got_endoff == del_endoff)
Christoph Hellwig0173c682017-10-17 14:16:22 -07004747 state |= BMAP_RIGHT_FILLING;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004748
Christoph Hellwig0173c682017-10-17 14:16:22 -07004749 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4750 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004751 /*
4752 * Matches the whole extent. Delete the entry.
4753 */
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07004754 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004755 xfs_iext_prev(ifp, icur);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004756 break;
Christoph Hellwig0173c682017-10-17 14:16:22 -07004757 case BMAP_LEFT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004758 /*
4759 * Deleting the first part of the extent.
4760 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004761 got->br_startoff = del_endoff;
4762 got->br_blockcount -= del->br_blockcount;
4763 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4764 got->br_blockcount), da_old);
4765 got->br_startblock = nullstartblock((int)da_new);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004766 xfs_iext_update_extent(ip, state, icur, got);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004767 break;
Christoph Hellwig0173c682017-10-17 14:16:22 -07004768 case BMAP_RIGHT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004769 /*
4770 * Deleting the last part of the extent.
4771 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004772 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4773 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4774 got->br_blockcount), da_old);
4775 got->br_startblock = nullstartblock((int)da_new);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004776 xfs_iext_update_extent(ip, state, icur, got);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004777 break;
4778 case 0:
4779 /*
4780 * Deleting the middle of the extent.
4781 *
4782 * Distribute the original indlen reservation across the two new
4783 * extents. Steal blocks from the deleted extent if necessary.
4784 * Stealing blocks simply fudges the fdblocks accounting below.
4785 * Warn if either of the new indlen reservations is zero as this
4786 * can lead to delalloc problems.
4787 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004788 got->br_blockcount = del->br_startoff - got->br_startoff;
4789 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4790
4791 new.br_blockcount = got_endoff - del_endoff;
4792 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4793
4794 WARN_ON_ONCE(!got_indlen || !new_indlen);
4795 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4796 del->br_blockcount);
4797
4798 got->br_startblock = nullstartblock((int)got_indlen);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004799
4800 new.br_startoff = del_endoff;
4801 new.br_state = got->br_state;
4802 new.br_startblock = nullstartblock((int)new_indlen);
4803
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004804 xfs_iext_update_extent(ip, state, icur, got);
4805 xfs_iext_next(ifp, icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07004806 xfs_iext_insert(ip, icur, &new, state);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004807
4808 da_new = got_indlen + new_indlen - stolen;
4809 del->br_blockcount -= stolen;
4810 break;
4811 }
4812
4813 ASSERT(da_old >= da_new);
4814 da_diff = da_old - da_new;
4815 if (!isrt)
4816 da_diff += del->br_blockcount;
4817 if (da_diff)
4818 xfs_mod_fdblocks(mp, da_diff, false);
4819 return error;
4820}
4821
4822void
4823xfs_bmap_del_extent_cow(
4824 struct xfs_inode *ip,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004825 struct xfs_iext_cursor *icur,
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004826 struct xfs_bmbt_irec *got,
4827 struct xfs_bmbt_irec *del)
4828{
4829 struct xfs_mount *mp = ip->i_mount;
4830 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4831 struct xfs_bmbt_irec new;
4832 xfs_fileoff_t del_endoff, got_endoff;
4833 int state = BMAP_COWFORK;
4834
4835 XFS_STATS_INC(mp, xs_del_exlist);
4836
4837 del_endoff = del->br_startoff + del->br_blockcount;
4838 got_endoff = got->br_startoff + got->br_blockcount;
4839
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004840 ASSERT(del->br_blockcount > 0);
4841 ASSERT(got->br_startoff <= del->br_startoff);
4842 ASSERT(got_endoff >= del_endoff);
4843 ASSERT(!isnullstartblock(got->br_startblock));
4844
4845 if (got->br_startoff == del->br_startoff)
Christoph Hellwig0173c682017-10-17 14:16:22 -07004846 state |= BMAP_LEFT_FILLING;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004847 if (got_endoff == del_endoff)
Christoph Hellwig0173c682017-10-17 14:16:22 -07004848 state |= BMAP_RIGHT_FILLING;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004849
Christoph Hellwig0173c682017-10-17 14:16:22 -07004850 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4851 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004852 /*
4853 * Matches the whole extent. Delete the entry.
4854 */
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07004855 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004856 xfs_iext_prev(ifp, icur);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004857 break;
Christoph Hellwig0173c682017-10-17 14:16:22 -07004858 case BMAP_LEFT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004859 /*
4860 * Deleting the first part of the extent.
4861 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004862 got->br_startoff = del_endoff;
4863 got->br_blockcount -= del->br_blockcount;
4864 got->br_startblock = del->br_startblock + del->br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004865 xfs_iext_update_extent(ip, state, icur, got);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004866 break;
Christoph Hellwig0173c682017-10-17 14:16:22 -07004867 case BMAP_RIGHT_FILLING:
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004868 /*
4869 * Deleting the last part of the extent.
4870 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004871 got->br_blockcount -= del->br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004872 xfs_iext_update_extent(ip, state, icur, got);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004873 break;
4874 case 0:
4875 /*
4876 * Deleting the middle of the extent.
4877 */
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004878 got->br_blockcount = del->br_startoff - got->br_startoff;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004879
4880 new.br_startoff = del_endoff;
4881 new.br_blockcount = got_endoff - del_endoff;
4882 new.br_state = got->br_state;
4883 new.br_startblock = del->br_startblock + del->br_blockcount;
4884
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004885 xfs_iext_update_extent(ip, state, icur, got);
4886 xfs_iext_next(ifp, icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07004887 xfs_iext_insert(ip, icur, &new, state);
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004888 break;
4889 }
Darrick J. Wong4b4c1322018-01-19 09:05:48 -08004890 ip->i_delayed_blks -= del->br_blockcount;
Christoph Hellwigfa5c8362016-10-20 15:54:14 +11004891}
4892
Brian Fostera9bd24a2016-03-15 11:42:46 +11004893/*
Dave Chinner9e5987a72013-02-25 12:31:26 +11004894 * Called by xfs_bmapi to update file extent records and the btree
Christoph Hellwige1d75532017-10-17 14:16:21 -07004895 * after removing space.
Dave Chinner9e5987a72013-02-25 12:31:26 +11004896 */
4897STATIC int /* error */
Christoph Hellwige1d75532017-10-17 14:16:21 -07004898xfs_bmap_del_extent_real(
Dave Chinner9e5987a72013-02-25 12:31:26 +11004899 xfs_inode_t *ip, /* incore inode pointer */
4900 xfs_trans_t *tp, /* current transaction pointer */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004901 struct xfs_iext_cursor *icur,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10004902 struct xfs_defer_ops *dfops, /* list of extents to be freed */
Dave Chinner9e5987a72013-02-25 12:31:26 +11004903 xfs_btree_cur_t *cur, /* if null, not a btree */
4904 xfs_bmbt_irec_t *del, /* data to remove from extents */
4905 int *logflagsp, /* inode logging flags */
Darrick J. Wong4847acf2016-10-03 09:11:27 -07004906 int whichfork, /* data or attr fork */
4907 int bflags) /* bmapi flags */
Dave Chinner9e5987a72013-02-25 12:31:26 +11004908{
Dave Chinner9e5987a72013-02-25 12:31:26 +11004909 xfs_fsblock_t del_endblock=0; /* first block past del */
4910 xfs_fileoff_t del_endoff; /* first offset past del */
Dave Chinner9e5987a72013-02-25 12:31:26 +11004911 int do_fx; /* free extent at end of routine */
Dave Chinner9e5987a72013-02-25 12:31:26 +11004912 int error; /* error return value */
Christoph Hellwig1b24b632017-10-17 14:16:22 -07004913 int flags = 0;/* inode logging flags */
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07004914 struct xfs_bmbt_irec got; /* current extent entry */
Dave Chinner9e5987a72013-02-25 12:31:26 +11004915 xfs_fileoff_t got_endoff; /* first offset past got */
4916 int i; /* temp state */
4917 xfs_ifork_t *ifp; /* inode fork pointer */
4918 xfs_mount_t *mp; /* mount structure */
4919 xfs_filblks_t nblks; /* quota/sb block count */
4920 xfs_bmbt_irec_t new; /* new record to be inserted */
4921 /* REFERENCED */
4922 uint qfield; /* quota field to update */
Christoph Hellwig060ea652017-10-19 11:02:29 -07004923 int state = xfs_bmap_fork_to_state(whichfork);
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07004924 struct xfs_bmbt_irec old;
Dave Chinner9e5987a72013-02-25 12:31:26 +11004925
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11004926 mp = ip->i_mount;
4927 XFS_STATS_INC(mp, xs_del_exlist);
Dave Chinner9e5987a72013-02-25 12:31:26 +11004928
Dave Chinner9e5987a72013-02-25 12:31:26 +11004929 ifp = XFS_IFORK_PTR(ip, whichfork);
Dave Chinner9e5987a72013-02-25 12:31:26 +11004930 ASSERT(del->br_blockcount > 0);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004931 xfs_iext_get_extent(ifp, icur, &got);
Dave Chinner9e5987a72013-02-25 12:31:26 +11004932 ASSERT(got.br_startoff <= del->br_startoff);
4933 del_endoff = del->br_startoff + del->br_blockcount;
4934 got_endoff = got.br_startoff + got.br_blockcount;
4935 ASSERT(got_endoff >= del_endoff);
Christoph Hellwige1d75532017-10-17 14:16:21 -07004936 ASSERT(!isnullstartblock(got.br_startblock));
Dave Chinner9e5987a72013-02-25 12:31:26 +11004937 qfield = 0;
4938 error = 0;
Dave Chinner9e5987a72013-02-25 12:31:26 +11004939
Christoph Hellwig1b24b632017-10-17 14:16:22 -07004940 /*
4941 * If it's the case where the directory code is running with no block
4942 * reservation, and the deleted block is in the middle of its extent,
4943 * and the resulting insert of an extent would cause transformation to
4944 * btree format, then reject it. The calling code will then swap blocks
4945 * around instead. We have to do this now, rather than waiting for the
4946 * conversion to btree format, since the transaction will be dirty then.
4947 */
4948 if (tp->t_blk_res == 0 &&
4949 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4950 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4951 XFS_IFORK_MAXEXT(ip, whichfork) &&
4952 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4953 return -ENOSPC;
4954
4955 flags = XFS_ILOG_CORE;
Christoph Hellwige1d75532017-10-17 14:16:21 -07004956 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4957 xfs_fsblock_t bno;
4958 xfs_filblks_t len;
4959
4960 ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
4961 ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
4962 bno = del->br_startblock;
4963 len = del->br_blockcount;
4964 do_div(bno, mp->m_sb.sb_rextsize);
4965 do_div(len, mp->m_sb.sb_rextsize);
4966 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4967 if (error)
4968 goto done;
Dave Chinner9e5987a72013-02-25 12:31:26 +11004969 do_fx = 0;
Christoph Hellwige1d75532017-10-17 14:16:21 -07004970 nblks = len * mp->m_sb.sb_rextsize;
4971 qfield = XFS_TRANS_DQ_RTBCOUNT;
4972 } else {
4973 do_fx = 1;
4974 nblks = del->br_blockcount;
4975 qfield = XFS_TRANS_DQ_BCOUNT;
4976 }
4977
4978 del_endblock = del->br_startblock + del->br_blockcount;
4979 if (cur) {
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07004980 error = xfs_bmbt_lookup_eq(cur, &got, &i);
Christoph Hellwige1d75532017-10-17 14:16:21 -07004981 if (error)
4982 goto done;
4983 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Dave Chinner9e5987a72013-02-25 12:31:26 +11004984 }
Darrick J. Wong340785c2016-08-03 11:33:42 +10004985
Christoph Hellwig491f6f8a2017-10-17 14:16:23 -07004986 if (got.br_startoff == del->br_startoff)
4987 state |= BMAP_LEFT_FILLING;
4988 if (got_endoff == del_endoff)
4989 state |= BMAP_RIGHT_FILLING;
4990
4991 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4992 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
Dave Chinner9e5987a72013-02-25 12:31:26 +11004993 /*
4994 * Matches the whole extent. Delete the entry.
4995 */
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07004996 xfs_iext_remove(ip, icur, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07004997 xfs_iext_prev(ifp, icur);
Dave Chinner9e5987a72013-02-25 12:31:26 +11004998 XFS_IFORK_NEXT_SET(ip, whichfork,
4999 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5000 flags |= XFS_ILOG_CORE;
5001 if (!cur) {
5002 flags |= xfs_ilog_fext(whichfork);
5003 break;
5004 }
5005 if ((error = xfs_btree_delete(cur, &i)))
5006 goto done;
Eric Sandeenc29aad42015-02-23 22:39:08 +11005007 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
Dave Chinner9e5987a72013-02-25 12:31:26 +11005008 break;
Christoph Hellwig491f6f8a2017-10-17 14:16:23 -07005009 case BMAP_LEFT_FILLING:
Dave Chinner9e5987a72013-02-25 12:31:26 +11005010 /*
5011 * Deleting the first part of the extent.
5012 */
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005013 got.br_startoff = del_endoff;
5014 got.br_startblock = del_endblock;
5015 got.br_blockcount -= del->br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005016 xfs_iext_update_extent(ip, state, icur, &got);
Dave Chinner9e5987a72013-02-25 12:31:26 +11005017 if (!cur) {
5018 flags |= xfs_ilog_fext(whichfork);
5019 break;
5020 }
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005021 error = xfs_bmbt_update(cur, &got);
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005022 if (error)
Dave Chinner9e5987a72013-02-25 12:31:26 +11005023 goto done;
5024 break;
Christoph Hellwig491f6f8a2017-10-17 14:16:23 -07005025 case BMAP_RIGHT_FILLING:
Dave Chinner9e5987a72013-02-25 12:31:26 +11005026 /*
5027 * Deleting the last part of the extent.
5028 */
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005029 got.br_blockcount -= del->br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005030 xfs_iext_update_extent(ip, state, icur, &got);
Dave Chinner9e5987a72013-02-25 12:31:26 +11005031 if (!cur) {
5032 flags |= xfs_ilog_fext(whichfork);
5033 break;
5034 }
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005035 error = xfs_bmbt_update(cur, &got);
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005036 if (error)
Dave Chinner9e5987a72013-02-25 12:31:26 +11005037 goto done;
5038 break;
Dave Chinner9e5987a72013-02-25 12:31:26 +11005039 case 0:
5040 /*
5041 * Deleting the middle of the extent.
5042 */
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005043 old = got;
Christoph Hellwigca5d8e5b2017-10-19 11:04:44 -07005044
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005045 got.br_blockcount = del->br_startoff - got.br_startoff;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005046 xfs_iext_update_extent(ip, state, icur, &got);
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005047
Dave Chinner9e5987a72013-02-25 12:31:26 +11005048 new.br_startoff = del_endoff;
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005049 new.br_blockcount = got_endoff - del_endoff;
Dave Chinner9e5987a72013-02-25 12:31:26 +11005050 new.br_state = got.br_state;
Christoph Hellwige1d75532017-10-17 14:16:21 -07005051 new.br_startblock = del_endblock;
Christoph Hellwig48fd52b2017-10-17 14:16:23 -07005052
Christoph Hellwige1d75532017-10-17 14:16:21 -07005053 flags |= XFS_ILOG_CORE;
5054 if (cur) {
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005055 error = xfs_bmbt_update(cur, &got);
Christoph Hellwige1d75532017-10-17 14:16:21 -07005056 if (error)
5057 goto done;
5058 error = xfs_btree_increment(cur, 0, &i);
5059 if (error)
5060 goto done;
5061 cur->bc_rec.b = new;
5062 error = xfs_btree_insert(cur, &i);
5063 if (error && error != -ENOSPC)
5064 goto done;
5065 /*
5066 * If get no-space back from btree insert, it tried a
5067 * split, and we have a zero block reservation. Fix up
5068 * our state and return the error.
5069 */
5070 if (error == -ENOSPC) {
5071 /*
5072 * Reset the cursor, don't trust it after any
5073 * insert operation.
5074 */
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07005075 error = xfs_bmbt_lookup_eq(cur, &got, &i);
Christoph Hellwige1d75532017-10-17 14:16:21 -07005076 if (error)
Dave Chinner9e5987a72013-02-25 12:31:26 +11005077 goto done;
Christoph Hellwige1d75532017-10-17 14:16:21 -07005078 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5079 /*
5080 * Update the btree record back
5081 * to the original value.
5082 */
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005083 error = xfs_bmbt_update(cur, &old);
Christoph Hellwige1d75532017-10-17 14:16:21 -07005084 if (error)
Dave Chinner9e5987a72013-02-25 12:31:26 +11005085 goto done;
5086 /*
Christoph Hellwige1d75532017-10-17 14:16:21 -07005087 * Reset the extent record back
5088 * to the original value.
Dave Chinner9e5987a72013-02-25 12:31:26 +11005089 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005090 xfs_iext_update_extent(ip, state, icur, &old);
Christoph Hellwige1d75532017-10-17 14:16:21 -07005091 flags = 0;
5092 error = -ENOSPC;
5093 goto done;
5094 }
5095 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5096 } else
5097 flags |= xfs_ilog_fext(whichfork);
5098 XFS_IFORK_NEXT_SET(ip, whichfork,
5099 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005100 xfs_iext_next(ifp, icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07005101 xfs_iext_insert(ip, icur, &new, state);
Dave Chinner9e5987a72013-02-25 12:31:26 +11005102 break;
5103 }
Darrick J. Wong9c194642016-08-03 12:16:05 +10005104
5105 /* remove reverse mapping */
Christoph Hellwige1d75532017-10-17 14:16:21 -07005106 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5107 if (error)
5108 goto done;
Darrick J. Wong9c194642016-08-03 12:16:05 +10005109
Dave Chinner9e5987a72013-02-25 12:31:26 +11005110 /*
5111 * If we need to, add to list of extents to delete.
5112 */
Darrick J. Wong4847acf2016-10-03 09:11:27 -07005113 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
Darrick J. Wong62aab202016-10-03 09:11:23 -07005114 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5115 error = xfs_refcount_decrease_extent(mp, dfops, del);
5116 if (error)
5117 goto done;
Brian Fosterfcb762f2018-05-09 08:45:04 -07005118 } else {
Brian Foster4e529332018-05-10 09:35:42 -07005119 __xfs_bmap_add_free(mp, dfops, del->br_startblock,
5120 del->br_blockcount, NULL,
5121 (bflags & XFS_BMAPI_NODISCARD) ||
5122 del->br_state == XFS_EXT_UNWRITTEN);
Brian Fosterfcb762f2018-05-09 08:45:04 -07005123 }
Darrick J. Wong62aab202016-10-03 09:11:23 -07005124 }
5125
Dave Chinner9e5987a72013-02-25 12:31:26 +11005126 /*
5127 * Adjust inode # blocks in the file.
5128 */
5129 if (nblks)
5130 ip->i_d.di_nblocks -= nblks;
5131 /*
5132 * Adjust quota data.
5133 */
Darrick J. Wong4847acf2016-10-03 09:11:27 -07005134 if (qfield && !(bflags & XFS_BMAPI_REMAP))
Dave Chinner9e5987a72013-02-25 12:31:26 +11005135 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5136
Dave Chinner9e5987a72013-02-25 12:31:26 +11005137done:
5138 *logflagsp = flags;
5139 return error;
5140}
5141
5142/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 * Unmap (remove) blocks from a file.
5144 * If nexts is nonzero then the number of extents to remove is limited to
5145 * that value. If not all extents in the block range can be removed then
5146 * *done is set.
5147 */
5148int /* error */
Darrick J. Wong44535932016-10-03 09:11:29 -07005149__xfs_bunmapi(
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150 xfs_trans_t *tp, /* transaction pointer */
5151 struct xfs_inode *ip, /* incore inode */
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005152 xfs_fileoff_t start, /* first file offset deleted */
Darrick J. Wong44535932016-10-03 09:11:29 -07005153 xfs_filblks_t *rlen, /* i/o: amount remaining */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 int flags, /* misc flags */
5155 xfs_extnum_t nexts, /* number of extents max */
5156 xfs_fsblock_t *firstblock, /* first allocated block
5157 controls a.g. for allocs */
Darrick J. Wong44535932016-10-03 09:11:29 -07005158 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159{
5160 xfs_btree_cur_t *cur; /* bmap btree cursor */
5161 xfs_bmbt_irec_t del; /* extent being deleted */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 int error; /* error return value */
5163 xfs_extnum_t extno; /* extent number in list */
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11005164 xfs_bmbt_irec_t got; /* current extent record */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 xfs_ifork_t *ifp; /* inode fork pointer */
5166 int isrt; /* freeing in rt area */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 int logflags; /* transaction logging flags */
5168 xfs_extlen_t mod; /* rt extent offset */
5169 xfs_mount_t *mp; /* mount structure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 int tmp_logflags; /* partial logging flags */
5171 int wasdel; /* was a delayed alloc extent */
5172 int whichfork; /* data or attribute fork */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 xfs_fsblock_t sum;
Darrick J. Wong44535932016-10-03 09:11:29 -07005174 xfs_filblks_t len = *rlen; /* length to unmap in file */
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005175 xfs_fileoff_t max_len;
Christoph Hellwig5b094d62017-07-18 11:16:51 -07005176 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005177 xfs_fileoff_t end;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005178 struct xfs_iext_cursor icur;
5179 bool done = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005181 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
Christoph Hellwig0b1b2132009-12-14 23:14:59 +00005182
Darrick J. Wong3993bae2016-10-03 09:11:32 -07005183 whichfork = xfs_bmapi_whichfork(flags);
5184 ASSERT(whichfork != XFS_COW_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 ifp = XFS_IFORK_PTR(ip, whichfork);
5186 if (unlikely(
5187 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5188 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5189 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5190 ip->i_mount);
Dave Chinner24513372014-06-25 14:58:08 +10005191 return -EFSCORRUPTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192 }
5193 mp = ip->i_mount;
5194 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10005195 return -EIO;
Christoph Hellwig54893272011-05-11 15:04:03 +00005196
Christoph Hellwigeef334e2013-12-06 12:30:17 -08005197 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 ASSERT(len > 0);
5199 ASSERT(nexts >= 0);
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00005200
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005201 /*
5202 * Guesstimate how many blocks we can unmap without running the risk of
5203 * blowing out the transaction with a mix of EFIs and reflink
5204 * adjustments.
5205 */
Darrick J. Wong8c57b882017-12-10 18:03:53 -08005206 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005207 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5208 else
5209 max_len = len;
5210
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5212 (error = xfs_iread_extents(tp, ip, whichfork)))
5213 return error;
Eric Sandeen5d829302016-11-08 12:59:42 +11005214 if (xfs_iext_count(ifp) == 0) {
Darrick J. Wong44535932016-10-03 09:11:29 -07005215 *rlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 return 0;
5217 }
Bill O'Donnellff6d6af2015-10-12 18:21:22 +11005218 XFS_STATS_INC(mp, xs_blk_unmap);
Nathan Scottdd9f4382006-01-11 15:28:28 +11005219 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
Christoph Hellwigdc560152017-10-23 16:32:39 -07005220 end = start + len;
Christoph Hellwigb4e91812010-06-23 18:11:15 +10005221
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005222 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
Christoph Hellwigdc560152017-10-23 16:32:39 -07005223 *rlen = 0;
5224 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225 }
Christoph Hellwigdc560152017-10-23 16:32:39 -07005226 end--;
Christoph Hellwig7efc7942016-11-24 11:39:44 +11005227
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 logflags = 0;
5229 if (ifp->if_flags & XFS_IFBROOT) {
5230 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
Christoph Hellwig561f7d12008-10-30 16:53:59 +11005231 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 cur->bc_private.b.firstblock = *firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005233 cur->bc_private.b.dfops = dfops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 cur->bc_private.b.flags = 0;
5235 } else
5236 cur = NULL;
Kamal Dasu5575acc2012-02-23 00:41:39 +00005237
5238 if (isrt) {
5239 /*
5240 * Synchronize by locking the bitmap inode.
5241 */
Darrick J. Wongf4a06602016-08-03 11:00:42 +10005242 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
Kamal Dasu5575acc2012-02-23 00:41:39 +00005243 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
Darrick J. Wongf4a06602016-08-03 11:00:42 +10005244 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5245 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
Kamal Dasu5575acc2012-02-23 00:41:39 +00005246 }
5247
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 extno = 0;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005249 while (end != (xfs_fileoff_t)-1 && end >= start &&
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005250 (nexts == 0 || extno < nexts) && max_len > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 /*
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005252 * Is the found extent after a hole in which end lives?
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253 * Just back up to the previous extent, if so.
5254 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005255 if (got.br_startoff > end &&
5256 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5257 done = true;
5258 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 }
5260 /*
5261 * Is the last block of this extent before the range
5262 * we're supposed to delete? If so, we're done.
5263 */
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005264 end = XFS_FILEOFF_MIN(end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265 got.br_startoff + got.br_blockcount - 1);
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005266 if (end < start)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 break;
5268 /*
5269 * Then deal with the (possibly delayed) allocated space
5270 * we found.
5271 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 del = got;
Eric Sandeen9d87c312009-01-14 23:22:07 -06005273 wasdel = isnullstartblock(del.br_startblock);
Christoph Hellwig5b094d62017-07-18 11:16:51 -07005274
5275 /*
5276 * Make sure we don't touch multiple AGF headers out of order
5277 * in a single transaction, as that could cause AB-BA deadlocks.
5278 */
5279 if (!wasdel) {
5280 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5281 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5282 break;
5283 prev_agno = agno;
5284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 if (got.br_startoff < start) {
5286 del.br_startoff = start;
5287 del.br_blockcount -= start - got.br_startoff;
5288 if (!wasdel)
5289 del.br_startblock += start - got.br_startoff;
5290 }
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005291 if (del.br_startoff + del.br_blockcount > end + 1)
5292 del.br_blockcount = end + 1 - del.br_startoff;
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005293
5294 /* How much can we safely unmap? */
5295 if (max_len < del.br_blockcount) {
5296 del.br_startoff += del.br_blockcount - max_len;
5297 if (!wasdel)
5298 del.br_startblock += del.br_blockcount - max_len;
5299 del.br_blockcount = max_len;
5300 }
5301
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 sum = del.br_startblock + del.br_blockcount;
5303 if (isrt &&
5304 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5305 /*
5306 * Realtime extent not lined up at the end.
5307 * The extent could have been split into written
5308 * and unwritten pieces, or we could just be
5309 * unmapping part of it. But we can't really
5310 * get rid of part of a realtime extent.
5311 */
5312 if (del.br_state == XFS_EXT_UNWRITTEN ||
Eric Sandeen62118702008-03-06 13:44:28 +11005313 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314 /*
5315 * This piece is unwritten, or we're not
5316 * using unwritten extents. Skip over it.
5317 */
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005318 ASSERT(end >= mod);
5319 end -= mod > del.br_blockcount ?
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 del.br_blockcount : mod;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005321 if (end < got.br_startoff &&
5322 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5323 done = true;
5324 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 }
5326 continue;
5327 }
5328 /*
5329 * It's written, turn it unwritten.
5330 * This is better than zeroing it.
5331 */
5332 ASSERT(del.br_state == XFS_EXT_NORM);
Christoph Hellwiga7e5d032016-03-02 09:58:21 +11005333 ASSERT(tp->t_blk_res > 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 /*
5335 * If this spans a realtime extent boundary,
5336 * chop it back to the start of the one we end at.
5337 */
5338 if (del.br_blockcount > mod) {
5339 del.br_startoff += del.br_blockcount - mod;
5340 del.br_startblock += del.br_blockcount - mod;
5341 del.br_blockcount = mod;
5342 }
5343 del.br_state = XFS_EXT_UNWRITTEN;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00005344 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005345 whichfork, &icur, &cur, &del,
Darrick J. Wong05a630d2017-02-02 15:14:01 -08005346 firstblock, dfops, &logflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 if (error)
5348 goto error0;
5349 goto nodelete;
5350 }
5351 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5352 /*
5353 * Realtime extent is lined up at the end but not
5354 * at the front. We'll get rid of full extents if
5355 * we can.
5356 */
5357 mod = mp->m_sb.sb_rextsize - mod;
5358 if (del.br_blockcount > mod) {
5359 del.br_blockcount -= mod;
5360 del.br_startoff += mod;
5361 del.br_startblock += mod;
5362 } else if ((del.br_startoff == start &&
5363 (del.br_state == XFS_EXT_UNWRITTEN ||
Christoph Hellwiga7e5d032016-03-02 09:58:21 +11005364 tp->t_blk_res == 0)) ||
Eric Sandeen62118702008-03-06 13:44:28 +11005365 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366 /*
5367 * Can't make it unwritten. There isn't
5368 * a full extent here so just skip it.
5369 */
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005370 ASSERT(end >= del.br_blockcount);
5371 end -= del.br_blockcount;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005372 if (got.br_startoff > end &&
5373 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5374 done = true;
5375 break;
5376 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 continue;
5378 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
Christoph Hellwig7efc7942016-11-24 11:39:44 +11005379 struct xfs_bmbt_irec prev;
5380
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 /*
5382 * This one is already unwritten.
5383 * It must have a written left neighbor.
5384 * Unwrite the killed part of that one and
5385 * try again.
5386 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005387 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5388 ASSERT(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389 ASSERT(prev.br_state == XFS_EXT_NORM);
Eric Sandeen9d87c312009-01-14 23:22:07 -06005390 ASSERT(!isnullstartblock(prev.br_startblock));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 ASSERT(del.br_startblock ==
5392 prev.br_startblock + prev.br_blockcount);
5393 if (prev.br_startoff < start) {
5394 mod = start - prev.br_startoff;
5395 prev.br_blockcount -= mod;
5396 prev.br_startblock += mod;
5397 prev.br_startoff = start;
5398 }
5399 prev.br_state = XFS_EXT_UNWRITTEN;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00005400 error = xfs_bmap_add_extent_unwritten_real(tp,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005401 ip, whichfork, &icur, &cur,
Darrick J. Wong05a630d2017-02-02 15:14:01 -08005402 &prev, firstblock, dfops,
5403 &logflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404 if (error)
5405 goto error0;
5406 goto nodelete;
5407 } else {
5408 ASSERT(del.br_state == XFS_EXT_NORM);
5409 del.br_state = XFS_EXT_UNWRITTEN;
Christoph Hellwiga5bd606b2011-09-18 20:40:54 +00005410 error = xfs_bmap_add_extent_unwritten_real(tp,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005411 ip, whichfork, &icur, &cur,
Darrick J. Wong05a630d2017-02-02 15:14:01 -08005412 &del, firstblock, dfops,
5413 &logflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 if (error)
5415 goto error0;
5416 goto nodelete;
5417 }
5418 }
Nathan Scott06d10dd2005-06-21 15:48:47 +10005419
Brian Fosterb2706a02016-03-15 11:42:46 +11005420 if (wasdel) {
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005421 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
Christoph Hellwige1d75532017-10-17 14:16:21 -07005422 &got, &del);
5423 } else {
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005424 error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
Christoph Hellwige1d75532017-10-17 14:16:21 -07005425 cur, &del, &tmp_logflags, whichfork,
5426 flags);
5427 logflags |= tmp_logflags;
Christoph Hellwigb213d692017-10-17 14:16:20 -07005428 }
Brian Fosterb2706a02016-03-15 11:42:46 +11005429
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 if (error)
5431 goto error0;
Brian Fosterb2706a02016-03-15 11:42:46 +11005432
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07005433 max_len -= del.br_blockcount;
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005434 end = del.br_startoff - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435nodelete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 /*
5437 * If not done go on to the next (previous) record.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 */
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005439 if (end != (xfs_fileoff_t)-1 && end >= start) {
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005440 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5441 (got.br_startoff > end &&
5442 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5443 done = true;
5444 break;
Christoph Hellwig00239ac2011-05-11 15:04:08 +00005445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 extno++;
5447 }
5448 }
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005449 if (done || end == (xfs_fileoff_t)-1 || end < start)
Darrick J. Wong44535932016-10-03 09:11:29 -07005450 *rlen = 0;
5451 else
Christoph Hellwig8280f6e2017-10-17 14:16:21 -07005452 *rlen = end - start + 1;
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00005453
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 /*
5455 * Convert to a btree if necessary.
5456 */
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00005457 if (xfs_bmap_needs_btree(ip, whichfork)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458 ASSERT(cur == NULL);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005459 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 &cur, 0, &tmp_logflags, whichfork);
5461 logflags |= tmp_logflags;
5462 if (error)
5463 goto error0;
5464 }
5465 /*
5466 * transform from btree to extents, give it cur
5467 */
Christoph Hellwig8096b1e2011-12-18 20:00:07 +00005468 else if (xfs_bmap_wants_extents(ip, whichfork)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 ASSERT(cur != NULL);
5470 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5471 whichfork);
5472 logflags |= tmp_logflags;
5473 if (error)
5474 goto error0;
5475 }
5476 /*
5477 * transform from extents to local?
5478 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479 error = 0;
5480error0:
5481 /*
5482 * Log everything. Do this after conversion, there's no point in
Mandy Kirkconnell4eea22f2006-03-14 13:29:52 +11005483 * logging the extent records if we've converted to btree format.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484 */
Eric Sandeen9d87c312009-01-14 23:22:07 -06005485 if ((logflags & xfs_ilog_fext(whichfork)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07005486 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
Eric Sandeen9d87c312009-01-14 23:22:07 -06005487 logflags &= ~xfs_ilog_fext(whichfork);
5488 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
Eric Sandeen9d87c312009-01-14 23:22:07 -06005490 logflags &= ~xfs_ilog_fbroot(whichfork);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491 /*
5492 * Log inode even in the error case, if the transaction
5493 * is dirty we'll need to shut down the filesystem.
5494 */
5495 if (logflags)
5496 xfs_trans_log_inode(tp, ip, logflags);
5497 if (cur) {
5498 if (!error) {
5499 *firstblock = cur->bc_private.b.firstblock;
5500 cur->bc_private.b.allocated = 0;
5501 }
5502 xfs_btree_del_cursor(cur,
5503 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5504 }
5505 return error;
5506}
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005507
Darrick J. Wong44535932016-10-03 09:11:29 -07005508/* Unmap a range of a file. */
5509int
5510xfs_bunmapi(
5511 xfs_trans_t *tp,
5512 struct xfs_inode *ip,
5513 xfs_fileoff_t bno,
5514 xfs_filblks_t len,
5515 int flags,
5516 xfs_extnum_t nexts,
5517 xfs_fsblock_t *firstblock,
5518 struct xfs_defer_ops *dfops,
5519 int *done)
5520{
5521 int error;
5522
5523 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5524 dfops);
5525 *done = (len == 0);
5526 return error;
5527}
5528
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005529/*
Brian Fosterddb19e32014-09-23 15:38:09 +10005530 * Determine whether an extent shift can be accomplished by a merge with the
5531 * extent that precedes the target hole of the shift.
5532 */
5533STATIC bool
5534xfs_bmse_can_merge(
5535 struct xfs_bmbt_irec *left, /* preceding extent */
5536 struct xfs_bmbt_irec *got, /* current extent to shift */
5537 xfs_fileoff_t shift) /* shift fsb */
5538{
5539 xfs_fileoff_t startoff;
5540
5541 startoff = got->br_startoff - shift;
5542
5543 /*
5544 * The extent, once shifted, must be adjacent in-file and on-disk with
5545 * the preceding extent.
5546 */
5547 if ((left->br_startoff + left->br_blockcount != startoff) ||
5548 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5549 (left->br_state != got->br_state) ||
5550 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5551 return false;
5552
5553 return true;
5554}
5555
5556/*
5557 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5558 * hole in the file. If an extent shift would result in the extent being fully
5559 * adjacent to the extent that currently precedes the hole, we can merge with
5560 * the preceding extent rather than do the shift.
5561 *
5562 * This function assumes the caller has verified a shift-by-merge is possible
5563 * with the provided extents via xfs_bmse_can_merge().
5564 */
5565STATIC int
5566xfs_bmse_merge(
5567 struct xfs_inode *ip,
5568 int whichfork,
5569 xfs_fileoff_t shift, /* shift fsb */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005570 struct xfs_iext_cursor *icur,
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005571 struct xfs_bmbt_irec *got, /* extent to shift */
5572 struct xfs_bmbt_irec *left, /* preceding extent */
Brian Fosterddb19e32014-09-23 15:38:09 +10005573 struct xfs_btree_cur *cur,
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005574 int *logflags, /* output */
5575 struct xfs_defer_ops *dfops)
Brian Fosterddb19e32014-09-23 15:38:09 +10005576{
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005577 struct xfs_bmbt_irec new;
Brian Fosterddb19e32014-09-23 15:38:09 +10005578 xfs_filblks_t blockcount;
5579 int error, i;
Eric Sandeen5fb5aee2015-02-23 22:39:13 +11005580 struct xfs_mount *mp = ip->i_mount;
Brian Fosterddb19e32014-09-23 15:38:09 +10005581
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005582 blockcount = left->br_blockcount + got->br_blockcount;
Brian Fosterddb19e32014-09-23 15:38:09 +10005583
5584 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5585 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005586 ASSERT(xfs_bmse_can_merge(left, got, shift));
Brian Fosterddb19e32014-09-23 15:38:09 +10005587
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005588 new = *left;
5589 new.br_blockcount = blockcount;
Brian Fosterddb19e32014-09-23 15:38:09 +10005590
5591 /*
5592 * Update the on-disk extent count, the btree if necessary and log the
5593 * inode.
5594 */
5595 XFS_IFORK_NEXT_SET(ip, whichfork,
5596 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5597 *logflags |= XFS_ILOG_CORE;
5598 if (!cur) {
5599 *logflags |= XFS_ILOG_DEXT;
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005600 goto done;
Brian Fosterddb19e32014-09-23 15:38:09 +10005601 }
5602
5603 /* lookup and remove the extent to merge */
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07005604 error = xfs_bmbt_lookup_eq(cur, got, &i);
Brian Fosterddb19e32014-09-23 15:38:09 +10005605 if (error)
Dave Chinner4db431f2014-12-04 09:42:40 +11005606 return error;
Eric Sandeen5fb5aee2015-02-23 22:39:13 +11005607 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
Brian Fosterddb19e32014-09-23 15:38:09 +10005608
5609 error = xfs_btree_delete(cur, &i);
5610 if (error)
Dave Chinner4db431f2014-12-04 09:42:40 +11005611 return error;
Eric Sandeen5fb5aee2015-02-23 22:39:13 +11005612 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
Brian Fosterddb19e32014-09-23 15:38:09 +10005613
5614 /* lookup and update size of the previous extent */
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07005615 error = xfs_bmbt_lookup_eq(cur, left, &i);
Brian Fosterddb19e32014-09-23 15:38:09 +10005616 if (error)
Dave Chinner4db431f2014-12-04 09:42:40 +11005617 return error;
Eric Sandeen5fb5aee2015-02-23 22:39:13 +11005618 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
Brian Fosterddb19e32014-09-23 15:38:09 +10005619
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005620 error = xfs_bmbt_update(cur, &new);
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005621 if (error)
5622 return error;
Brian Fosterddb19e32014-09-23 15:38:09 +10005623
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005624done:
Christoph Hellwigc38ccf52017-11-03 10:34:47 -07005625 xfs_iext_remove(ip, icur, 0);
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005626 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5627 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5628 &new);
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005629
Darrick J. Wong4cc1ee52017-08-30 16:06:36 -07005630 /* update reverse mapping. rmap functions merge the rmaps for us */
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005631 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
5632 if (error)
5633 return error;
Darrick J. Wong4cc1ee52017-08-30 16:06:36 -07005634 memcpy(&new, got, sizeof(new));
5635 new.br_startoff = left->br_startoff + left->br_blockcount;
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005636 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
Brian Fosterddb19e32014-09-23 15:38:09 +10005637}
5638
Christoph Hellwigbf806282017-10-19 11:07:34 -07005639static int
5640xfs_bmap_shift_update_extent(
5641 struct xfs_inode *ip,
5642 int whichfork,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005643 struct xfs_iext_cursor *icur,
Christoph Hellwigbf806282017-10-19 11:07:34 -07005644 struct xfs_bmbt_irec *got,
5645 struct xfs_btree_cur *cur,
5646 int *logflags,
5647 struct xfs_defer_ops *dfops,
5648 xfs_fileoff_t startoff)
Brian Fostera979bdf2014-09-23 15:39:04 +10005649{
Christoph Hellwigbf806282017-10-19 11:07:34 -07005650 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005651 struct xfs_bmbt_irec prev = *got;
Christoph Hellwigbf806282017-10-19 11:07:34 -07005652 int error, i;
Brian Fostera979bdf2014-09-23 15:39:04 +10005653
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005654 *logflags |= XFS_ILOG_CORE;
5655
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005656 got->br_startoff = startoff;
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005657
5658 if (cur) {
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005659 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005660 if (error)
5661 return error;
5662 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5663
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005664 error = xfs_bmbt_update(cur, got);
Christoph Hellwig4da6b512017-08-29 15:44:13 -07005665 if (error)
5666 return error;
5667 } else {
5668 *logflags |= XFS_ILOG_DEXT;
5669 }
5670
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005671 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5672 got);
Brian Fostera979bdf2014-09-23 15:39:04 +10005673
Darrick J. Wong9c194642016-08-03 12:16:05 +10005674 /* update reverse mapping */
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005675 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
Darrick J. Wong9c194642016-08-03 12:16:05 +10005676 if (error)
5677 return error;
Christoph Hellwig11f75b32017-10-19 11:08:51 -07005678 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
Brian Fostera979bdf2014-09-23 15:39:04 +10005679}
5680
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005681int
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005682xfs_bmap_collapse_extents(
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005683 struct xfs_trans *tp,
5684 struct xfs_inode *ip,
Namjae Jeona904b1c2015-03-25 15:08:56 +11005685 xfs_fileoff_t *next_fsb,
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005686 xfs_fileoff_t offset_shift_fsb,
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005687 bool *done,
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005688 xfs_fsblock_t *firstblock,
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005689 struct xfs_defer_ops *dfops)
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005690{
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005691 int whichfork = XFS_DATA_FORK;
5692 struct xfs_mount *mp = ip->i_mount;
5693 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5694 struct xfs_btree_cur *cur = NULL;
Christoph Hellwigbf806282017-10-19 11:07:34 -07005695 struct xfs_bmbt_irec got, prev;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005696 struct xfs_iext_cursor icur;
Christoph Hellwigbf806282017-10-19 11:07:34 -07005697 xfs_fileoff_t new_startoff;
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005698 int error = 0;
5699 int logflags = 0;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005700
5701 if (unlikely(XFS_TEST_ERROR(
5702 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5703 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07005704 mp, XFS_ERRTAG_BMAPIFORMAT))) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005705 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
Dave Chinner24513372014-06-25 14:58:08 +10005706 return -EFSCORRUPTED;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005707 }
5708
5709 if (XFS_FORCED_SHUTDOWN(mp))
Dave Chinner24513372014-06-25 14:58:08 +10005710 return -EIO;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005711
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005712 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005713
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005714 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005715 error = xfs_iread_extents(tp, ip, whichfork);
5716 if (error)
5717 return error;
5718 }
5719
Brian Fosterddb19e32014-09-23 15:38:09 +10005720 if (ifp->if_flags & XFS_IFBROOT) {
5721 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5722 cur->bc_private.b.firstblock = *firstblock;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005723 cur->bc_private.b.dfops = dfops;
Brian Fosterddb19e32014-09-23 15:38:09 +10005724 cur->bc_private.b.flags = 0;
5725 }
5726
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005727 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005728 *done = true;
5729 goto del_cursor;
5730 }
Eric Sandeend41c6172017-11-27 18:23:32 -08005731 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5732 del_cursor);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005733
Christoph Hellwigbf806282017-10-19 11:07:34 -07005734 new_startoff = got.br_startoff - offset_shift_fsb;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005735 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
Christoph Hellwigbf806282017-10-19 11:07:34 -07005736 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5737 error = -EINVAL;
5738 goto del_cursor;
5739 }
5740
Christoph Hellwigbf806282017-10-19 11:07:34 -07005741 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5742 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005743 &icur, &got, &prev, cur, &logflags,
5744 dfops);
Christoph Hellwigbf806282017-10-19 11:07:34 -07005745 if (error)
5746 goto del_cursor;
5747 goto done;
5748 }
5749 } else {
5750 if (got.br_startoff < offset_shift_fsb) {
5751 error = -EINVAL;
5752 goto del_cursor;
5753 }
5754 }
5755
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005756 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5757 &logflags, dfops, new_startoff);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005758 if (error)
5759 goto del_cursor;
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005760
Christoph Hellwig42630362017-11-03 10:34:41 -07005761done:
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005762 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5763 *done = true;
5764 goto del_cursor;
Christoph Hellwig40591bd2017-10-19 11:08:51 -07005765 }
5766
Christoph Hellwigbf806282017-10-19 11:07:34 -07005767 *next_fsb = got.br_startoff;
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005768del_cursor:
5769 if (cur)
5770 xfs_btree_del_cursor(cur,
5771 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005772 if (logflags)
5773 xfs_trans_log_inode(tp, ip, logflags);
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005774 return error;
5775}
5776
5777int
5778xfs_bmap_insert_extents(
5779 struct xfs_trans *tp,
5780 struct xfs_inode *ip,
5781 xfs_fileoff_t *next_fsb,
5782 xfs_fileoff_t offset_shift_fsb,
5783 bool *done,
5784 xfs_fileoff_t stop_fsb,
5785 xfs_fsblock_t *firstblock,
5786 struct xfs_defer_ops *dfops)
5787{
5788 int whichfork = XFS_DATA_FORK;
5789 struct xfs_mount *mp = ip->i_mount;
5790 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5791 struct xfs_btree_cur *cur = NULL;
Christoph Hellwig5936dc52017-10-19 11:08:52 -07005792 struct xfs_bmbt_irec got, next;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005793 struct xfs_iext_cursor icur;
Christoph Hellwigbf806282017-10-19 11:07:34 -07005794 xfs_fileoff_t new_startoff;
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005795 int error = 0;
5796 int logflags = 0;
5797
5798 if (unlikely(XFS_TEST_ERROR(
5799 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5800 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5801 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5802 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5803 return -EFSCORRUPTED;
5804 }
5805
5806 if (XFS_FORCED_SHUTDOWN(mp))
5807 return -EIO;
5808
5809 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5810
5811 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5812 error = xfs_iread_extents(tp, ip, whichfork);
5813 if (error)
5814 return error;
5815 }
5816
5817 if (ifp->if_flags & XFS_IFBROOT) {
5818 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5819 cur->bc_private.b.firstblock = *firstblock;
5820 cur->bc_private.b.dfops = dfops;
5821 cur->bc_private.b.flags = 0;
5822 }
5823
Namjae Jeona904b1c2015-03-25 15:08:56 +11005824 if (*next_fsb == NULLFSBLOCK) {
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005825 xfs_iext_last(ifp, &icur);
5826 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
Christoph Hellwig5936dc52017-10-19 11:08:52 -07005827 stop_fsb > got.br_startoff) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005828 *done = true;
Namjae Jeona904b1c2015-03-25 15:08:56 +11005829 goto del_cursor;
5830 }
Christoph Hellwig05b7c8a2017-08-29 15:44:12 -07005831 } else {
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005832 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005833 *done = true;
Christoph Hellwig05b7c8a2017-08-29 15:44:12 -07005834 goto del_cursor;
5835 }
Namjae Jeona904b1c2015-03-25 15:08:56 +11005836 }
Eric Sandeend41c6172017-11-27 18:23:32 -08005837 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5838 del_cursor);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005839
Christoph Hellwig5936dc52017-10-19 11:08:52 -07005840 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005841 error = -EIO;
5842 goto del_cursor;
Namjae Jeona904b1c2015-03-25 15:08:56 +11005843 }
5844
Christoph Hellwigbf806282017-10-19 11:07:34 -07005845 new_startoff = got.br_startoff + offset_shift_fsb;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005846 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
Christoph Hellwigbf806282017-10-19 11:07:34 -07005847 if (new_startoff + got.br_blockcount > next.br_startoff) {
5848 error = -EINVAL;
5849 goto del_cursor;
5850 }
5851
5852 /*
5853 * Unlike a left shift (which involves a hole punch), a right
5854 * shift does not modify extent neighbors in any way. We should
5855 * never find mergeable extents in this scenario. Check anyways
5856 * and warn if we encounter two extents that could be one.
5857 */
5858 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5859 WARN_ON_ONCE(1);
5860 }
5861
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005862 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5863 &logflags, dfops, new_startoff);
Christoph Hellwig6b18af02017-10-19 11:07:10 -07005864 if (error)
5865 goto del_cursor;
Christoph Hellwig5936dc52017-10-19 11:08:52 -07005866
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005867 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
Christoph Hellwig5936dc52017-10-19 11:08:52 -07005868 stop_fsb >= got.br_startoff + got.br_blockcount) {
Christoph Hellwigecfea3f2017-10-19 11:07:11 -07005869 *done = true;
Christoph Hellwig6b18af02017-10-19 11:07:10 -07005870 goto del_cursor;
5871 }
Christoph Hellwig6b18af02017-10-19 11:07:10 -07005872
5873 *next_fsb = got.br_startoff;
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005874del_cursor:
5875 if (cur)
5876 xfs_btree_del_cursor(cur,
5877 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
Brian Fosterca446d82014-09-02 12:12:53 +10005878 if (logflags)
5879 xfs_trans_log_inode(tp, ip, logflags);
Namjae Jeone1d8fb82014-02-24 10:58:19 +11005880 return error;
5881}
Namjae Jeona904b1c2015-03-25 15:08:56 +11005882
5883/*
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005884 * Splits an extent into two extents at split_fsb block such that it is the
5885 * first block of the current_ext. @ext is a target extent to be split.
5886 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5887 * hole or the first block of extents, just return 0.
Namjae Jeona904b1c2015-03-25 15:08:56 +11005888 */
5889STATIC int
5890xfs_bmap_split_extent_at(
5891 struct xfs_trans *tp,
5892 struct xfs_inode *ip,
5893 xfs_fileoff_t split_fsb,
5894 xfs_fsblock_t *firstfsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005895 struct xfs_defer_ops *dfops)
Namjae Jeona904b1c2015-03-25 15:08:56 +11005896{
5897 int whichfork = XFS_DATA_FORK;
5898 struct xfs_btree_cur *cur = NULL;
Namjae Jeona904b1c2015-03-25 15:08:56 +11005899 struct xfs_bmbt_irec got;
5900 struct xfs_bmbt_irec new; /* split extent */
5901 struct xfs_mount *mp = ip->i_mount;
5902 struct xfs_ifork *ifp;
5903 xfs_fsblock_t gotblkcnt; /* new block count for got */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005904 struct xfs_iext_cursor icur;
Namjae Jeona904b1c2015-03-25 15:08:56 +11005905 int error = 0;
5906 int logflags = 0;
5907 int i = 0;
5908
5909 if (unlikely(XFS_TEST_ERROR(
5910 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5911 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07005912 mp, XFS_ERRTAG_BMAPIFORMAT))) {
Namjae Jeona904b1c2015-03-25 15:08:56 +11005913 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5914 XFS_ERRLEVEL_LOW, mp);
5915 return -EFSCORRUPTED;
5916 }
5917
5918 if (XFS_FORCED_SHUTDOWN(mp))
5919 return -EIO;
5920
5921 ifp = XFS_IFORK_PTR(ip, whichfork);
5922 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5923 /* Read in all the extents */
5924 error = xfs_iread_extents(tp, ip, whichfork);
5925 if (error)
5926 return error;
5927 }
5928
5929 /*
Christoph Hellwig4c35445b2017-08-29 15:44:13 -07005930 * If there are not extents, or split_fsb lies in a hole we are done.
Namjae Jeona904b1c2015-03-25 15:08:56 +11005931 */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005932 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
Christoph Hellwig4c35445b2017-08-29 15:44:13 -07005933 got.br_startoff >= split_fsb)
Namjae Jeona904b1c2015-03-25 15:08:56 +11005934 return 0;
5935
5936 gotblkcnt = split_fsb - got.br_startoff;
5937 new.br_startoff = split_fsb;
5938 new.br_startblock = got.br_startblock + gotblkcnt;
5939 new.br_blockcount = got.br_blockcount - gotblkcnt;
5940 new.br_state = got.br_state;
5941
5942 if (ifp->if_flags & XFS_IFBROOT) {
5943 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5944 cur->bc_private.b.firstblock = *firstfsb;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005945 cur->bc_private.b.dfops = dfops;
Namjae Jeona904b1c2015-03-25 15:08:56 +11005946 cur->bc_private.b.flags = 0;
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07005947 error = xfs_bmbt_lookup_eq(cur, &got, &i);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005948 if (error)
5949 goto del_cursor;
5950 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5951 }
5952
Namjae Jeona904b1c2015-03-25 15:08:56 +11005953 got.br_blockcount = gotblkcnt;
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005954 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5955 &got);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005956
5957 logflags = XFS_ILOG_CORE;
5958 if (cur) {
Christoph Hellwiga67d00a2017-10-17 14:16:26 -07005959 error = xfs_bmbt_update(cur, &got);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005960 if (error)
5961 goto del_cursor;
5962 } else
5963 logflags |= XFS_ILOG_DEXT;
5964
5965 /* Add new extent */
Christoph Hellwigb2b17122017-11-03 10:34:43 -07005966 xfs_iext_next(ifp, &icur);
Christoph Hellwig0254c2f2017-11-03 10:34:46 -07005967 xfs_iext_insert(ip, &icur, &new, 0);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005968 XFS_IFORK_NEXT_SET(ip, whichfork,
5969 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5970
5971 if (cur) {
Christoph Hellwige16cf9b2017-10-17 14:16:26 -07005972 error = xfs_bmbt_lookup_eq(cur, &new, &i);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005973 if (error)
5974 goto del_cursor;
5975 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
Namjae Jeona904b1c2015-03-25 15:08:56 +11005976 error = xfs_btree_insert(cur, &i);
5977 if (error)
5978 goto del_cursor;
5979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5980 }
5981
5982 /*
5983 * Convert to a btree if necessary.
5984 */
5985 if (xfs_bmap_needs_btree(ip, whichfork)) {
5986 int tmp_logflags; /* partial log flag return val */
5987
5988 ASSERT(cur == NULL);
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10005989 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
Namjae Jeona904b1c2015-03-25 15:08:56 +11005990 &cur, 0, &tmp_logflags, whichfork);
5991 logflags |= tmp_logflags;
5992 }
5993
5994del_cursor:
5995 if (cur) {
5996 cur->bc_private.b.allocated = 0;
5997 xfs_btree_del_cursor(cur,
5998 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5999 }
6000
6001 if (logflags)
6002 xfs_trans_log_inode(tp, ip, logflags);
6003 return error;
6004}
6005
6006int
6007xfs_bmap_split_extent(
6008 struct xfs_inode *ip,
6009 xfs_fileoff_t split_fsb)
6010{
6011 struct xfs_mount *mp = ip->i_mount;
6012 struct xfs_trans *tp;
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10006013 struct xfs_defer_ops dfops;
Namjae Jeona904b1c2015-03-25 15:08:56 +11006014 xfs_fsblock_t firstfsb;
Namjae Jeona904b1c2015-03-25 15:08:56 +11006015 int error;
6016
Christoph Hellwig253f4912016-04-06 09:19:55 +10006017 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6018 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6019 if (error)
Namjae Jeona904b1c2015-03-25 15:08:56 +11006020 return error;
Namjae Jeona904b1c2015-03-25 15:08:56 +11006021
6022 xfs_ilock(ip, XFS_ILOCK_EXCL);
6023 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6024
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10006025 xfs_defer_init(&dfops, &firstfsb);
Namjae Jeona904b1c2015-03-25 15:08:56 +11006026
6027 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10006028 &firstfsb, &dfops);
Namjae Jeona904b1c2015-03-25 15:08:56 +11006029 if (error)
6030 goto out;
6031
Christoph Hellwig8ad7c6292017-08-28 10:21:04 -07006032 error = xfs_defer_finish(&tp, &dfops);
Namjae Jeona904b1c2015-03-25 15:08:56 +11006033 if (error)
6034 goto out;
6035
Christoph Hellwig70393312015-06-04 13:48:08 +10006036 return xfs_trans_commit(tp);
Namjae Jeona904b1c2015-03-25 15:08:56 +11006037
6038out:
Darrick J. Wong2c3234d2016-08-03 11:19:29 +10006039 xfs_defer_cancel(&dfops);
Christoph Hellwig4906e212015-06-04 13:47:56 +10006040 xfs_trans_cancel(tp);
Namjae Jeona904b1c2015-03-25 15:08:56 +11006041 return error;
6042}
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006043
6044/* Deferred mapping is only for real extents in the data fork. */
6045static bool
6046xfs_bmap_is_update_needed(
6047 struct xfs_bmbt_irec *bmap)
6048{
6049 return bmap->br_startblock != HOLESTARTBLOCK &&
6050 bmap->br_startblock != DELAYSTARTBLOCK;
6051}
6052
6053/* Record a bmap intent. */
6054static int
6055__xfs_bmap_add(
6056 struct xfs_mount *mp,
6057 struct xfs_defer_ops *dfops,
6058 enum xfs_bmap_intent_type type,
6059 struct xfs_inode *ip,
6060 int whichfork,
6061 struct xfs_bmbt_irec *bmap)
6062{
6063 int error;
6064 struct xfs_bmap_intent *bi;
6065
6066 trace_xfs_bmap_defer(mp,
6067 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6068 type,
6069 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6070 ip->i_ino, whichfork,
6071 bmap->br_startoff,
6072 bmap->br_blockcount,
6073 bmap->br_state);
6074
6075 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6076 INIT_LIST_HEAD(&bi->bi_list);
6077 bi->bi_type = type;
6078 bi->bi_owner = ip;
6079 bi->bi_whichfork = whichfork;
6080 bi->bi_bmap = *bmap;
6081
Christoph Hellwig882d8782017-08-28 10:21:03 -07006082 error = xfs_defer_ijoin(dfops, bi->bi_owner);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006083 if (error) {
6084 kmem_free(bi);
6085 return error;
6086 }
6087
6088 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6089 return 0;
6090}
6091
6092/* Map an extent into a file. */
6093int
6094xfs_bmap_map_extent(
6095 struct xfs_mount *mp,
6096 struct xfs_defer_ops *dfops,
6097 struct xfs_inode *ip,
6098 struct xfs_bmbt_irec *PREV)
6099{
6100 if (!xfs_bmap_is_update_needed(PREV))
6101 return 0;
6102
6103 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6104 XFS_DATA_FORK, PREV);
6105}
6106
6107/* Unmap an extent out of a file. */
6108int
6109xfs_bmap_unmap_extent(
6110 struct xfs_mount *mp,
6111 struct xfs_defer_ops *dfops,
6112 struct xfs_inode *ip,
6113 struct xfs_bmbt_irec *PREV)
6114{
6115 if (!xfs_bmap_is_update_needed(PREV))
6116 return 0;
6117
6118 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6119 XFS_DATA_FORK, PREV);
6120}
6121
6122/*
6123 * Process one of the deferred bmap operations. We pass back the
6124 * btree cursor to maintain our lock on the bmapbt between calls.
6125 */
6126int
6127xfs_bmap_finish_one(
6128 struct xfs_trans *tp,
6129 struct xfs_defer_ops *dfops,
6130 struct xfs_inode *ip,
6131 enum xfs_bmap_intent_type type,
6132 int whichfork,
6133 xfs_fileoff_t startoff,
6134 xfs_fsblock_t startblock,
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006135 xfs_filblks_t *blockcount,
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006136 xfs_exntst_t state)
6137{
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006138 xfs_fsblock_t firstfsb;
6139 int error = 0;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006140
Darrick J. Wong4c1a67b2017-07-17 14:30:51 -07006141 /*
6142 * firstfsb is tied to the transaction lifetime and is used to
6143 * ensure correct AG locking order and schedule work item
6144 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6145 * to only making one bmap call per transaction, so it should
6146 * be safe to have it as a local variable here.
6147 */
6148 firstfsb = NULLFSBLOCK;
6149
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006150 trace_xfs_bmap_deferred(tp->t_mountp,
6151 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6152 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006153 ip->i_ino, whichfork, startoff, *blockcount, state);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006154
Christoph Hellwig39e07da2017-04-11 16:45:53 -07006155 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006156 return -EFSCORRUPTED;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006157
6158 if (XFS_TEST_ERROR(false, tp->t_mountp,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -07006159 XFS_ERRTAG_BMAP_FINISH_ONE))
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006160 return -EIO;
6161
6162 switch (type) {
6163 case XFS_BMAP_MAP:
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006164 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
Christoph Hellwig6ebd5a42017-04-11 16:45:55 -07006165 startblock, dfops);
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006166 *blockcount = 0;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006167 break;
6168 case XFS_BMAP_UNMAP:
Darrick J. Wonge1a4e372017-06-14 21:25:57 -07006169 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6170 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -07006171 break;
6172 default:
6173 ASSERT(0);
6174 error = -EFSCORRUPTED;
6175 }
6176
6177 return error;
6178}
Darrick J. Wong30b09842018-03-23 10:06:52 -07006179
6180/* Check that an inode's extent does not have invalid flags or bad ranges. */
6181xfs_failaddr_t
6182xfs_bmap_validate_extent(
6183 struct xfs_inode *ip,
6184 int whichfork,
6185 struct xfs_bmbt_irec *irec)
6186{
6187 struct xfs_mount *mp = ip->i_mount;
6188 xfs_fsblock_t endfsb;
6189 bool isrt;
6190
6191 isrt = XFS_IS_REALTIME_INODE(ip);
6192 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6193 if (isrt) {
6194 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6195 return __this_address;
6196 if (!xfs_verify_rtbno(mp, endfsb))
6197 return __this_address;
6198 } else {
6199 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6200 return __this_address;
6201 if (!xfs_verify_fsbno(mp, endfsb))
6202 return __this_address;
6203 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6204 XFS_FSB_TO_AGNO(mp, endfsb))
6205 return __this_address;
6206 }
6207 if (irec->br_state != XFS_EXT_NORM) {
6208 if (whichfork != XFS_DATA_FORK)
6209 return __this_address;
6210 if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
6211 return __this_address;
6212 }
6213 return NULL;
6214}