blob: 8a8d1aeec52abd4077719e5d35c89ecc1ca896f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_types.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
Nathan Scotta844f452005-11-02 14:38:42 +110027#include "xfs_dir2.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include "xfs_dmapi.h"
29#include "xfs_mount.h"
Nathan Scotta844f452005-11-02 14:38:42 +110030#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110033#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_btree.h"
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +110038#include "xfs_btree_trace.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include "xfs_ialloc.h"
40#include "xfs_alloc.h"
41#include "xfs_error.h"
42
43/*
44 * Prototypes for internal functions.
45 */
46
47STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int);
48STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
49STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
50STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53/*
54 * Internal functions.
55 */
56
57/*
58 * Single level of the xfs_alloc_delete record deletion routine.
59 * Delete record pointed to by cur/level.
60 * Remove the record from its block then rebalance the tree.
61 * Return 0 for error, 1 for done, 2 to go on to the next level.
62 */
63STATIC int /* error */
64xfs_alloc_delrec(
65 xfs_btree_cur_t *cur, /* btree cursor */
66 int level, /* level removing record from */
67 int *stat) /* fail/done/go-on */
68{
69 xfs_agf_t *agf; /* allocation group freelist header */
70 xfs_alloc_block_t *block; /* btree block record/key lives in */
71 xfs_agblock_t bno; /* btree block number */
72 xfs_buf_t *bp; /* buffer for block */
73 int error; /* error return value */
74 int i; /* loop index */
75 xfs_alloc_key_t key; /* kp points here if block is level 0 */
76 xfs_agblock_t lbno; /* left block's block number */
77 xfs_buf_t *lbp; /* left block's buffer pointer */
78 xfs_alloc_block_t *left; /* left btree block */
79 xfs_alloc_key_t *lkp=NULL; /* left block key pointer */
80 xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */
81 int lrecs=0; /* number of records in left block */
82 xfs_alloc_rec_t *lrp; /* left block record pointer */
83 xfs_mount_t *mp; /* mount structure */
84 int ptr; /* index in btree block for this rec */
85 xfs_agblock_t rbno; /* right block's block number */
86 xfs_buf_t *rbp; /* right block's buffer pointer */
87 xfs_alloc_block_t *right; /* right btree block */
88 xfs_alloc_key_t *rkp; /* right block key pointer */
89 xfs_alloc_ptr_t *rpp; /* right block address pointer */
90 int rrecs=0; /* number of records in right block */
Eric Sandeen91d87232006-09-28 11:05:40 +100091 int numrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 xfs_alloc_rec_t *rrp; /* right block record pointer */
93 xfs_btree_cur_t *tcur; /* temporary btree cursor */
94
95 /*
96 * Get the index of the entry being deleted, check for nothing there.
97 */
98 ptr = cur->bc_ptrs[level];
99 if (ptr == 0) {
100 *stat = 0;
101 return 0;
102 }
103 /*
104 * Get the buffer & block containing the record or key/ptr.
105 */
106 bp = cur->bc_bufs[level];
107 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
108#ifdef DEBUG
109 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
110 return error;
111#endif
112 /*
113 * Fail if we're off the end of the block.
114 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000115 numrecs = be16_to_cpu(block->bb_numrecs);
116 if (ptr > numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 *stat = 0;
118 return 0;
119 }
120 XFS_STATS_INC(xs_abt_delrec);
121 /*
122 * It's a nonleaf. Excise the key and ptr being deleted, by
123 * sliding the entries past them down one.
124 * Log the changed areas of the block.
125 */
126 if (level > 0) {
127 lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
128 lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
129#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000130 for (i = ptr; i < numrecs; i++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100131 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return error;
133 }
134#endif
Eric Sandeen91d87232006-09-28 11:05:40 +1000135 if (ptr < numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 memmove(&lkp[ptr - 1], &lkp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000137 (numrecs - ptr) * sizeof(*lkp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 memmove(&lpp[ptr - 1], &lpp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000139 (numrecs - ptr) * sizeof(*lpp));
140 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1);
141 xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 }
143 }
144 /*
145 * It's a leaf. Excise the record being deleted, by sliding the
146 * entries past it down one. Log the changed areas of the block.
147 */
148 else {
149 lrp = XFS_ALLOC_REC_ADDR(block, 1, cur);
Eric Sandeen91d87232006-09-28 11:05:40 +1000150 if (ptr < numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 memmove(&lrp[ptr - 1], &lrp[ptr],
Eric Sandeen91d87232006-09-28 11:05:40 +1000152 (numrecs - ptr) * sizeof(*lrp));
153 xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155 /*
156 * If it's the first record in the block, we'll need a key
157 * structure to pass up to the next level (updkey).
158 */
159 if (ptr == 1) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100160 key.ar_startblock = lrp->ar_startblock;
161 key.ar_blockcount = lrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 lkp = &key;
163 }
164 }
165 /*
166 * Decrement and log the number of entries in the block.
167 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000168 numrecs--;
169 block->bb_numrecs = cpu_to_be16(numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
171 /*
172 * See if the longest free extent in the allocation group was
173 * changed by this operation. True if it's the by-size btree, and
174 * this is the leaf level, and there is no right sibling block,
175 * and this was the last record.
176 */
177 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
178 mp = cur->bc_mp;
179
180 if (level == 0 &&
181 cur->bc_btnum == XFS_BTNUM_CNT &&
Christoph Hellwig16259e72005-11-02 15:11:25 +1100182 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000183 ptr > numrecs) {
184 ASSERT(ptr == numrecs + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 /*
186 * There are still records in the block. Grab the size
187 * from the last one.
188 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000189 if (numrecs) {
190 rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100191 agf->agf_longest = rrp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 }
193 /*
194 * No free extents left.
195 */
196 else
197 agf->agf_longest = 0;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100198 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest =
199 be32_to_cpu(agf->agf_longest);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
201 XFS_AGF_LONGEST);
202 }
203 /*
204 * Is this the root level? If so, we're almost done.
205 */
206 if (level == cur->bc_nlevels - 1) {
207 /*
208 * If this is the root level,
209 * and there's only one entry left,
210 * and it's NOT the leaf level,
211 * then we can get rid of this level.
212 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000213 if (numrecs == 1 && level > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 /*
215 * lpp is still set to the first pointer in the block.
216 * Make it the new root of the btree.
217 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100218 bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]);
219 agf->agf_roots[cur->bc_btnum] = *lpp;
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800220 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100221 mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 /*
223 * Put this buffer/block on the ag's freelist.
224 */
David Chinner92821e22007-05-24 15:26:31 +1000225 error = xfs_alloc_put_freelist(cur->bc_tp,
226 cur->bc_private.a.agbp, NULL, bno, 1);
227 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return error;
229 /*
230 * Since blocks move to the free list without the
231 * coordination used in xfs_bmap_finish, we can't allow
232 * block to be available for reallocation and
233 * non-transaction writing (user data) until we know
234 * that the transaction that moved it to the free list
235 * is permanently on disk. We track the blocks by
236 * declaring these blocks as "busy"; the busy list is
237 * maintained on a per-ag basis and each transaction
238 * records which entries should be removed when the
239 * iclog commits to disk. If a busy block is
240 * allocated, the iclog is pushed up to the LSN
241 * that freed the block.
242 */
243 xfs_alloc_mark_busy(cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +1100244 be32_to_cpu(agf->agf_seqno), bno, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246 xfs_trans_agbtree_delta(cur->bc_tp, -1);
247 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
248 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
249 /*
250 * Update the cursor so there's one fewer level.
251 */
252 xfs_btree_setbuf(cur, level, NULL);
253 cur->bc_nlevels--;
254 } else if (level > 0 &&
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100255 (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 return error;
257 *stat = 1;
258 return 0;
259 }
260 /*
261 * If we deleted the leftmost entry in the block, update the
262 * key values above us in the tree.
263 */
Christoph Hellwig38bb7422008-10-30 16:56:22 +1100264 if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)lkp, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return error;
266 /*
267 * If the number of records remaining in the block is at least
268 * the minimum, we're done.
269 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000270 if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100271 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return error;
273 *stat = 1;
274 return 0;
275 }
276 /*
277 * Otherwise, we have to move some records around to keep the
278 * tree balanced. Look at the left and right sibling blocks to
279 * see if we can re-balance by moving only one record.
280 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100281 rbno = be32_to_cpu(block->bb_rightsib);
282 lbno = be32_to_cpu(block->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 bno = NULLAGBLOCK;
284 ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK);
285 /*
286 * Duplicate the cursor so our btree manipulations here won't
287 * disrupt the next level up.
288 */
289 if ((error = xfs_btree_dup_cursor(cur, &tcur)))
290 return error;
291 /*
292 * If there's a right sibling, see if it's ok to shift an entry
293 * out of it.
294 */
295 if (rbno != NULLAGBLOCK) {
296 /*
297 * Move the temp cursor to the last entry in the next block.
298 * Actually any entry but the first would suffice.
299 */
300 i = xfs_btree_lastrec(tcur, level);
301 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig637aa502008-10-30 16:55:45 +1100302 if ((error = xfs_btree_increment(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 goto error0;
304 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
305 i = xfs_btree_lastrec(tcur, level);
306 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
307 /*
308 * Grab a pointer to the block.
309 */
310 rbp = tcur->bc_bufs[level];
311 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
312#ifdef DEBUG
313 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
314 goto error0;
315#endif
316 /*
317 * Grab the current block number, for future use.
318 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100319 bno = be32_to_cpu(right->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 /*
321 * If right block is full enough so that removing one entry
322 * won't make it too empty, and left-shifting an entry out
323 * of right to us works, we're done.
324 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100325 if (be16_to_cpu(right->bb_numrecs) - 1 >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
Christoph Hellwig687b8902008-10-30 16:56:53 +1100327 if ((error = xfs_btree_lshift(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 goto error0;
329 if (i) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100330 ASSERT(be16_to_cpu(block->bb_numrecs) >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 XFS_ALLOC_BLOCK_MINRECS(level, cur));
332 xfs_btree_del_cursor(tcur,
333 XFS_BTREE_NOERROR);
334 if (level > 0 &&
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100335 (error = xfs_btree_decrement(cur, level,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 &i)))
337 return error;
338 *stat = 1;
339 return 0;
340 }
341 }
342 /*
343 * Otherwise, grab the number of records in right for
344 * future reference, and fix up the temp cursor to point
345 * to our block again (last record).
346 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100347 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (lbno != NULLAGBLOCK) {
349 i = xfs_btree_firstrec(tcur, level);
350 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100351 if ((error = xfs_btree_decrement(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 goto error0;
353 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
354 }
355 }
356 /*
357 * If there's a left sibling, see if it's ok to shift an entry
358 * out of it.
359 */
360 if (lbno != NULLAGBLOCK) {
361 /*
362 * Move the temp cursor to the first entry in the
363 * previous block.
364 */
365 i = xfs_btree_firstrec(tcur, level);
366 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100367 if ((error = xfs_btree_decrement(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 goto error0;
369 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
370 xfs_btree_firstrec(tcur, level);
371 /*
372 * Grab a pointer to the block.
373 */
374 lbp = tcur->bc_bufs[level];
375 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
376#ifdef DEBUG
377 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
378 goto error0;
379#endif
380 /*
381 * Grab the current block number, for future use.
382 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100383 bno = be32_to_cpu(left->bb_rightsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 /*
385 * If left block is full enough so that removing one entry
386 * won't make it too empty, and right-shifting an entry out
387 * of left to us works, we're done.
388 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100389 if (be16_to_cpu(left->bb_numrecs) - 1 >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 XFS_ALLOC_BLOCK_MINRECS(level, cur)) {
Christoph Hellwig9eaead52008-10-30 16:56:43 +1100391 if ((error = xfs_btree_rshift(tcur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 goto error0;
393 if (i) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100394 ASSERT(be16_to_cpu(block->bb_numrecs) >=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 XFS_ALLOC_BLOCK_MINRECS(level, cur));
396 xfs_btree_del_cursor(tcur,
397 XFS_BTREE_NOERROR);
398 if (level == 0)
399 cur->bc_ptrs[0]++;
400 *stat = 1;
401 return 0;
402 }
403 }
404 /*
405 * Otherwise, grab the number of records in right for
406 * future reference.
407 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100408 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
410 /*
411 * Delete the temp cursor, we're done with it.
412 */
413 xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
414 /*
415 * If here, we need to do a join to keep the tree balanced.
416 */
417 ASSERT(bno != NULLAGBLOCK);
418 /*
419 * See if we can join with the left neighbor block.
420 */
421 if (lbno != NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000422 lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 /*
424 * Set "right" to be the starting block,
425 * "left" to be the left neighbor.
426 */
427 rbno = bno;
428 right = block;
Eric Sandeen91d87232006-09-28 11:05:40 +1000429 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 rbp = bp;
431 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
432 cur->bc_private.a.agno, lbno, 0, &lbp,
433 XFS_ALLOC_BTREE_REF)))
434 return error;
435 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000436 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
438 return error;
439 }
440 /*
441 * If that won't work, see if we can join with the right neighbor block.
442 */
443 else if (rbno != NULLAGBLOCK &&
Eric Sandeen91d87232006-09-28 11:05:40 +1000444 rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 /*
446 * Set "left" to be the starting block,
447 * "right" to be the right neighbor.
448 */
449 lbno = bno;
450 left = block;
Eric Sandeen91d87232006-09-28 11:05:40 +1000451 lrecs = be16_to_cpu(left->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 lbp = bp;
453 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
454 cur->bc_private.a.agno, rbno, 0, &rbp,
455 XFS_ALLOC_BTREE_REF)))
456 return error;
457 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000458 rrecs = be16_to_cpu(right->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if ((error = xfs_btree_check_sblock(cur, right, level, rbp)))
460 return error;
461 }
462 /*
463 * Otherwise, we can't fix the imbalance.
464 * Just return. This is probably a logic error, but it's not fatal.
465 */
466 else {
Christoph Hellwig8df4da42008-10-30 16:55:58 +1100467 if (level > 0 && (error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 return error;
469 *stat = 1;
470 return 0;
471 }
472 /*
473 * We're now going to join "left" and "right" by moving all the stuff
474 * in "right" to "left" and deleting "right".
475 */
476 if (level > 0) {
477 /*
478 * It's a non-leaf. Move keys and pointers.
479 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000480 lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur);
481 lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
483 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
484#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000485 for (i = 0; i < rrecs; i++) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100486 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return error;
488 }
489#endif
Eric Sandeen91d87232006-09-28 11:05:40 +1000490 memcpy(lkp, rkp, rrecs * sizeof(*lkp));
491 memcpy(lpp, rpp, rrecs * sizeof(*lpp));
492 xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
493 xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 } else {
495 /*
496 * It's a leaf. Move records.
497 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000498 lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Eric Sandeen91d87232006-09-28 11:05:40 +1000500 memcpy(lrp, rrp, rrecs * sizeof(*lrp));
501 xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
503 /*
504 * If we joined with the left neighbor, set the buffer in the
505 * cursor to the left block, and fix up the index.
506 */
507 if (bp != lbp) {
508 xfs_btree_setbuf(cur, level, lbp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000509 cur->bc_ptrs[level] += lrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 }
511 /*
512 * If we joined with the right neighbor and there's a level above
513 * us, increment the cursor at that level.
514 */
515 else if (level + 1 < cur->bc_nlevels &&
Christoph Hellwig637aa502008-10-30 16:55:45 +1100516 (error = xfs_btree_increment(cur, level + 1, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return error;
518 /*
519 * Fix up the number of records in the surviving block.
520 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000521 lrecs += rrecs;
522 left->bb_numrecs = cpu_to_be16(lrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 /*
524 * Fix up the right block pointer in the surviving block, and log it.
525 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100526 left->bb_rightsib = right->bb_rightsib;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
528 /*
529 * If there is a right sibling now, make it point to the
530 * remaining block.
531 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100532 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 xfs_alloc_block_t *rrblock;
534 xfs_buf_t *rrbp;
535
536 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
Christoph Hellwig16259e72005-11-02 15:11:25 +1100537 cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 &rrbp, XFS_ALLOC_BTREE_REF)))
539 return error;
540 rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
541 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
542 return error;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100543 rrblock->bb_leftsib = cpu_to_be32(lbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
545 }
546 /*
547 * Free the deleting block by putting it on the freelist.
548 */
David Chinner92821e22007-05-24 15:26:31 +1000549 error = xfs_alloc_put_freelist(cur->bc_tp,
550 cur->bc_private.a.agbp, NULL, rbno, 1);
551 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return error;
553 /*
554 * Since blocks move to the free list without the coordination
555 * used in xfs_bmap_finish, we can't allow block to be available
556 * for reallocation and non-transaction writing (user data)
557 * until we know that the transaction that moved it to the free
558 * list is permanently on disk. We track the blocks by declaring
559 * these blocks as "busy"; the busy list is maintained on a
560 * per-ag basis and each transaction records which entries
561 * should be removed when the iclog commits to disk. If a
562 * busy block is allocated, the iclog is pushed up to the
563 * LSN that freed the block.
564 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100565 xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 xfs_trans_agbtree_delta(cur->bc_tp, -1);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 /*
569 * Adjust the current level's cursor so that we're left referring
570 * to the right node, after we're done.
571 * If this leaves the ptr value 0 our caller will fix it up.
572 */
573 if (level > 0)
574 cur->bc_ptrs[level]--;
575 /*
576 * Return value means the next level up has something to do.
577 */
578 *stat = 2;
579 return 0;
580
581error0:
582 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
583 return error;
584}
585
586/*
587 * Insert one record/level. Return information to the caller
588 * allowing the next level up to proceed if necessary.
589 */
590STATIC int /* error */
591xfs_alloc_insrec(
592 xfs_btree_cur_t *cur, /* btree cursor */
593 int level, /* level to insert record at */
594 xfs_agblock_t *bnop, /* i/o: block number inserted */
595 xfs_alloc_rec_t *recp, /* i/o: record data inserted */
596 xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
597 int *stat) /* output: success/failure */
598{
599 xfs_agf_t *agf; /* allocation group freelist header */
600 xfs_alloc_block_t *block; /* btree block record/key lives in */
601 xfs_buf_t *bp; /* buffer for block */
602 int error; /* error return value */
603 int i; /* loop index */
604 xfs_alloc_key_t key; /* key value being inserted */
605 xfs_alloc_key_t *kp; /* pointer to btree keys */
606 xfs_agblock_t nbno; /* block number of allocated block */
607 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
608 xfs_alloc_key_t nkey; /* new key value, from split */
609 xfs_alloc_rec_t nrec; /* new record value, for caller */
Eric Sandeen91d87232006-09-28 11:05:40 +1000610 int numrecs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 int optr; /* old ptr value */
612 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
613 int ptr; /* index in btree block for this rec */
614 xfs_alloc_rec_t *rp; /* pointer to btree records */
615
Christoph Hellwig16259e72005-11-02 15:11:25 +1100616 ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
Christoph Hellwig5bde1ba92005-11-02 15:06:18 +1100617
618 /*
619 * GCC doesn't understand the (arguably complex) control flow in
620 * this function and complains about uninitialized structure fields
621 * without this.
622 */
623 memset(&nrec, 0, sizeof(nrec));
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /*
626 * If we made it to the root level, allocate a new root block
627 * and we're done.
628 */
629 if (level >= cur->bc_nlevels) {
630 XFS_STATS_INC(xs_abt_insrec);
631 if ((error = xfs_alloc_newroot(cur, &i)))
632 return error;
633 *bnop = NULLAGBLOCK;
634 *stat = i;
635 return 0;
636 }
637 /*
638 * Make a key out of the record data to be inserted, and save it.
639 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100640 key.ar_startblock = recp->ar_startblock;
641 key.ar_blockcount = recp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 optr = ptr = cur->bc_ptrs[level];
643 /*
644 * If we're off the left edge, return failure.
645 */
646 if (ptr == 0) {
647 *stat = 0;
648 return 0;
649 }
650 XFS_STATS_INC(xs_abt_insrec);
651 /*
652 * Get pointers to the btree buffer and block.
653 */
654 bp = cur->bc_bufs[level];
655 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
Eric Sandeen91d87232006-09-28 11:05:40 +1000656 numrecs = be16_to_cpu(block->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657#ifdef DEBUG
658 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
659 return error;
660 /*
661 * Check that the new entry is being inserted in the right place.
662 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000663 if (ptr <= numrecs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 if (level == 0) {
665 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
666 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
667 } else {
668 kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
669 xfs_btree_check_key(cur->bc_btnum, &key, kp);
670 }
671 }
672#endif
673 nbno = NULLAGBLOCK;
Nathan Scott1121b212006-09-28 10:58:40 +1000674 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 /*
676 * If the block is full, we can't insert the new entry until we
677 * make the block un-full.
678 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000679 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 /*
681 * First, try shifting an entry to the right neighbor.
682 */
Christoph Hellwig9eaead52008-10-30 16:56:43 +1100683 if ((error = xfs_btree_rshift(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 return error;
685 if (i) {
686 /* nothing */
687 }
688 /*
689 * Next, try shifting an entry to the left neighbor.
690 */
691 else {
Christoph Hellwig687b8902008-10-30 16:56:53 +1100692 if ((error = xfs_btree_lshift(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return error;
694 if (i)
695 optr = ptr = cur->bc_ptrs[level];
696 else {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100697 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 /*
699 * Next, try splitting the current block in
700 * half. If this works we have to re-set our
701 * variables because we could be in a
702 * different block now.
703 */
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100704 if ((error = xfs_btree_split(cur, level, &bno,
705 (union xfs_btree_key *)&nkey,
706 &ncur, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 return error;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100708 nbno = be32_to_cpu(bno.s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 if (i) {
710 bp = cur->bc_bufs[level];
711 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
712#ifdef DEBUG
713 if ((error =
714 xfs_btree_check_sblock(cur,
715 block, level, bp)))
716 return error;
717#endif
718 ptr = cur->bc_ptrs[level];
Christoph Hellwig16259e72005-11-02 15:11:25 +1100719 nrec.ar_startblock = nkey.ar_startblock;
720 nrec.ar_blockcount = nkey.ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
722 /*
723 * Otherwise the insert fails.
724 */
725 else {
726 *stat = 0;
727 return 0;
728 }
729 }
730 }
731 }
732 /*
733 * At this point we know there's room for our new entry in the block
734 * we're pointing at.
735 */
Eric Sandeen91d87232006-09-28 11:05:40 +1000736 numrecs = be16_to_cpu(block->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (level > 0) {
738 /*
739 * It's a non-leaf entry. Make a hole for the new data
740 * in the key and ptr regions of the block.
741 */
742 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
743 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
744#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000745 for (i = numrecs; i >= ptr; i--) {
Christoph Hellwig16259e72005-11-02 15:11:25 +1100746 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 return error;
748 }
749#endif
750 memmove(&kp[ptr], &kp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000751 (numrecs - ptr + 1) * sizeof(*kp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 memmove(&pp[ptr], &pp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000753 (numrecs - ptr + 1) * sizeof(*pp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754#ifdef DEBUG
755 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
756 return error;
757#endif
758 /*
759 * Now stuff the new data in, bump numrecs and log the new data.
760 */
761 kp[ptr - 1] = key;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100762 pp[ptr - 1] = cpu_to_be32(*bnop);
Eric Sandeen91d87232006-09-28 11:05:40 +1000763 numrecs++;
764 block->bb_numrecs = cpu_to_be16(numrecs);
765 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
766 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000768 if (ptr < numrecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
770 kp + ptr);
771#endif
772 } else {
773 /*
774 * It's a leaf entry. Make a hole for the new record.
775 */
776 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
777 memmove(&rp[ptr], &rp[ptr - 1],
Eric Sandeen91d87232006-09-28 11:05:40 +1000778 (numrecs - ptr + 1) * sizeof(*rp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 /*
780 * Now stuff the new record in, bump numrecs
781 * and log the new data.
782 */
Christoph Hellwigc38e5e82006-09-28 10:57:17 +1000783 rp[ptr - 1] = *recp;
Eric Sandeen91d87232006-09-28 11:05:40 +1000784 numrecs++;
785 block->bb_numrecs = cpu_to_be16(numrecs);
786 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787#ifdef DEBUG
Eric Sandeen91d87232006-09-28 11:05:40 +1000788 if (ptr < numrecs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
790 rp + ptr);
791#endif
792 }
793 /*
794 * Log the new number of records in the btree header.
795 */
796 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
797 /*
798 * If we inserted at the start of a block, update the parents' keys.
799 */
Christoph Hellwig38bb7422008-10-30 16:56:22 +1100800 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 return error;
802 /*
803 * Look to see if the longest extent in the allocation group
804 * needs to be updated.
805 */
806
807 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
808 if (level == 0 &&
809 cur->bc_btnum == XFS_BTNUM_CNT &&
Christoph Hellwig16259e72005-11-02 15:11:25 +1100810 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
811 be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 /*
813 * If this is a leaf in the by-size btree and there
814 * is no right sibling block and this block is bigger
815 * than the previous longest block, update it.
816 */
Christoph Hellwig16259e72005-11-02 15:11:25 +1100817 agf->agf_longest = recp->ar_blockcount;
818 cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
819 = be32_to_cpu(recp->ar_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
821 XFS_AGF_LONGEST);
822 }
823 /*
824 * Return the new block number, if any.
825 * If there is one, give back a record value and a cursor too.
826 */
827 *bnop = nbno;
828 if (nbno != NULLAGBLOCK) {
Christoph Hellwigc38e5e82006-09-28 10:57:17 +1000829 *recp = nrec;
830 *curp = ncur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832 *stat = 1;
833 return 0;
834}
835
836/*
837 * Log header fields from a btree block.
838 */
839STATIC void
840xfs_alloc_log_block(
841 xfs_trans_t *tp, /* transaction pointer */
842 xfs_buf_t *bp, /* buffer containing btree block */
843 int fields) /* mask of fields: XFS_BB_... */
844{
845 int first; /* first byte offset logged */
846 int last; /* last byte offset logged */
847 static const short offsets[] = { /* table of offsets */
848 offsetof(xfs_alloc_block_t, bb_magic),
849 offsetof(xfs_alloc_block_t, bb_level),
850 offsetof(xfs_alloc_block_t, bb_numrecs),
851 offsetof(xfs_alloc_block_t, bb_leftsib),
852 offsetof(xfs_alloc_block_t, bb_rightsib),
853 sizeof(xfs_alloc_block_t)
854 };
855
856 xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last);
857 xfs_trans_log_buf(tp, bp, first, last);
858}
859
860/*
861 * Log keys from a btree block (nonleaf).
862 */
863STATIC void
864xfs_alloc_log_keys(
865 xfs_btree_cur_t *cur, /* btree cursor */
866 xfs_buf_t *bp, /* buffer containing btree block */
867 int kfirst, /* index of first key to log */
868 int klast) /* index of last key to log */
869{
870 xfs_alloc_block_t *block; /* btree block to log from */
871 int first; /* first byte offset logged */
872 xfs_alloc_key_t *kp; /* key pointer in btree block */
873 int last; /* last byte offset logged */
874
875 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
876 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
877 first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block);
878 last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block);
879 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
880}
881
882/*
883 * Log block pointer fields from a btree block (nonleaf).
884 */
885STATIC void
886xfs_alloc_log_ptrs(
887 xfs_btree_cur_t *cur, /* btree cursor */
888 xfs_buf_t *bp, /* buffer containing btree block */
889 int pfirst, /* index of first pointer to log */
890 int plast) /* index of last pointer to log */
891{
892 xfs_alloc_block_t *block; /* btree block to log from */
893 int first; /* first byte offset logged */
894 int last; /* last byte offset logged */
895 xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */
896
897 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
898 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
899 first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block);
900 last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block);
901 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
902}
903
904/*
905 * Log records from a btree block (leaf).
906 */
907STATIC void
908xfs_alloc_log_recs(
909 xfs_btree_cur_t *cur, /* btree cursor */
910 xfs_buf_t *bp, /* buffer containing btree block */
911 int rfirst, /* index of first record to log */
912 int rlast) /* index of last record to log */
913{
914 xfs_alloc_block_t *block; /* btree block to log from */
915 int first; /* first byte offset logged */
916 int last; /* last byte offset logged */
917 xfs_alloc_rec_t *rp; /* record pointer for btree block */
918
919
920 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
921 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
922#ifdef DEBUG
923 {
924 xfs_agf_t *agf;
925 xfs_alloc_rec_t *p;
926
927 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
928 for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++)
Christoph Hellwig16259e72005-11-02 15:11:25 +1100929 ASSERT(be32_to_cpu(p->ar_startblock) +
930 be32_to_cpu(p->ar_blockcount) <=
931 be32_to_cpu(agf->agf_length));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 }
933#endif
934 first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block);
935 last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block);
936 xfs_trans_log_buf(cur->bc_tp, bp, first, last);
937}
938
939/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 * Allocate a new root block, fill it in.
941 */
942STATIC int /* error */
943xfs_alloc_newroot(
944 xfs_btree_cur_t *cur, /* btree cursor */
945 int *stat) /* success/failure */
946{
947 int error; /* error return value */
948 xfs_agblock_t lbno; /* left block number */
949 xfs_buf_t *lbp; /* left btree buffer */
950 xfs_alloc_block_t *left; /* left btree block */
951 xfs_mount_t *mp; /* mount structure */
952 xfs_agblock_t nbno; /* new block number */
953 xfs_buf_t *nbp; /* new (root) buffer */
954 xfs_alloc_block_t *new; /* new (root) btree block */
955 int nptr; /* new value for key index, 1 or 2 */
956 xfs_agblock_t rbno; /* right block number */
957 xfs_buf_t *rbp; /* right btree buffer */
958 xfs_alloc_block_t *right; /* right btree block */
959
960 mp = cur->bc_mp;
961
962 ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp));
963 /*
964 * Get a buffer from the freelist blocks, for the new root.
965 */
David Chinner92821e22007-05-24 15:26:31 +1000966 error = xfs_alloc_get_freelist(cur->bc_tp,
967 cur->bc_private.a.agbp, &nbno, 1);
968 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 return error;
970 /*
971 * None available, we fail.
972 */
973 if (nbno == NULLAGBLOCK) {
974 *stat = 0;
975 return 0;
976 }
977 xfs_trans_agbtree_delta(cur->bc_tp, 1);
978 nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno,
979 0);
980 new = XFS_BUF_TO_ALLOC_BLOCK(nbp);
981 /*
982 * Set the root data in the a.g. freespace structure.
983 */
984 {
985 xfs_agf_t *agf; /* a.g. freespace header */
986 xfs_agnumber_t seqno;
987
988 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100989 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800990 be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100991 seqno = be32_to_cpu(agf->agf_seqno);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++;
993 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
994 XFS_AGF_ROOTS | XFS_AGF_LEVELS);
995 }
996 /*
997 * At the previous root level there are now two blocks: the old
998 * root, and the new block generated when it was split.
999 * We don't know which one the cursor is pointing at, so we
1000 * set up variables "left" and "right" for each case.
1001 */
1002 lbp = cur->bc_bufs[cur->bc_nlevels - 1];
1003 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1004#ifdef DEBUG
1005 if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp)))
1006 return error;
1007#endif
Christoph Hellwig16259e72005-11-02 15:11:25 +11001008 if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 /*
1010 * Our block is left, pick up the right block.
1011 */
1012 lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp));
Christoph Hellwig16259e72005-11-02 15:11:25 +11001013 rbno = be32_to_cpu(left->bb_rightsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1015 cur->bc_private.a.agno, rbno, 0, &rbp,
1016 XFS_ALLOC_BTREE_REF)))
1017 return error;
1018 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1019 if ((error = xfs_btree_check_sblock(cur, right,
1020 cur->bc_nlevels - 1, rbp)))
1021 return error;
1022 nptr = 1;
1023 } else {
1024 /*
1025 * Our block is right, pick up the left block.
1026 */
1027 rbp = lbp;
1028 right = left;
1029 rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp));
Christoph Hellwig16259e72005-11-02 15:11:25 +11001030 lbno = be32_to_cpu(right->bb_leftsib);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 if ((error = xfs_btree_read_bufs(mp, cur->bc_tp,
1032 cur->bc_private.a.agno, lbno, 0, &lbp,
1033 XFS_ALLOC_BTREE_REF)))
1034 return error;
1035 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1036 if ((error = xfs_btree_check_sblock(cur, left,
1037 cur->bc_nlevels - 1, lbp)))
1038 return error;
1039 nptr = 2;
1040 }
1041 /*
1042 * Fill in the new block's btree header and log it.
1043 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001044 new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1045 new->bb_level = cpu_to_be16(cur->bc_nlevels);
1046 new->bb_numrecs = cpu_to_be16(2);
1047 new->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1048 new->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS);
1050 ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK);
1051 /*
1052 * Fill in the key data in the new root.
1053 */
1054 {
1055 xfs_alloc_key_t *kp; /* btree key pointer */
1056
1057 kp = XFS_ALLOC_KEY_ADDR(new, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001058 if (be16_to_cpu(left->bb_level) > 0) {
Christoph Hellwigc38e5e82006-09-28 10:57:17 +10001059 kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur);
1060 kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 } else {
1062 xfs_alloc_rec_t *rp; /* btree record pointer */
1063
1064 rp = XFS_ALLOC_REC_ADDR(left, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001065 kp[0].ar_startblock = rp->ar_startblock;
1066 kp[0].ar_blockcount = rp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 rp = XFS_ALLOC_REC_ADDR(right, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001068 kp[1].ar_startblock = rp->ar_startblock;
1069 kp[1].ar_blockcount = rp->ar_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 }
1071 }
1072 xfs_alloc_log_keys(cur, nbp, 1, 2);
1073 /*
1074 * Fill in the pointer data in the new root.
1075 */
1076 {
1077 xfs_alloc_ptr_t *pp; /* btree address pointer */
1078
1079 pp = XFS_ALLOC_PTR_ADDR(new, 1, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001080 pp[0] = cpu_to_be32(lbno);
1081 pp[1] = cpu_to_be32(rbno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 }
1083 xfs_alloc_log_ptrs(cur, nbp, 1, 2);
1084 /*
1085 * Fix up the cursor.
1086 */
1087 xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
1088 cur->bc_ptrs[cur->bc_nlevels] = nptr;
1089 cur->bc_nlevels++;
1090 *stat = 1;
1091 return 0;
1092}
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
1095/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 * Externally visible routines.
1097 */
1098
1099/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 * Delete the record pointed to by cur.
1101 * The cursor refers to the place where the record was (could be inserted)
1102 * when the operation returns.
1103 */
1104int /* error */
1105xfs_alloc_delete(
1106 xfs_btree_cur_t *cur, /* btree cursor */
1107 int *stat) /* success/failure */
1108{
1109 int error; /* error return value */
1110 int i; /* result code */
1111 int level; /* btree level */
1112
1113 /*
1114 * Go up the tree, starting at leaf level.
1115 * If 2 is returned then a join was done; go to the next level.
1116 * Otherwise we are done.
1117 */
1118 for (level = 0, i = 2; i == 2; level++) {
1119 if ((error = xfs_alloc_delrec(cur, level, &i)))
1120 return error;
1121 }
1122 if (i == 0) {
1123 for (level = 1; level < cur->bc_nlevels; level++) {
1124 if (cur->bc_ptrs[level] == 0) {
Christoph Hellwig8df4da42008-10-30 16:55:58 +11001125 if ((error = xfs_btree_decrement(cur, level, &i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return error;
1127 break;
1128 }
1129 }
1130 }
1131 *stat = i;
1132 return 0;
1133}
1134
1135/*
1136 * Get the data from the pointed-to record.
1137 */
1138int /* error */
1139xfs_alloc_get_rec(
1140 xfs_btree_cur_t *cur, /* btree cursor */
1141 xfs_agblock_t *bno, /* output: starting block of extent */
1142 xfs_extlen_t *len, /* output: length of extent */
1143 int *stat) /* output: success/failure */
1144{
1145 xfs_alloc_block_t *block; /* btree block */
1146#ifdef DEBUG
1147 int error; /* error return value */
1148#endif
1149 int ptr; /* record number */
1150
1151 ptr = cur->bc_ptrs[0];
1152 block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]);
1153#ifdef DEBUG
1154 if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0])))
1155 return error;
1156#endif
1157 /*
1158 * Off the right end or left end, return failure.
1159 */
Christoph Hellwig16259e72005-11-02 15:11:25 +11001160 if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 *stat = 0;
1162 return 0;
1163 }
1164 /*
1165 * Point to the record and extract its data.
1166 */
1167 {
1168 xfs_alloc_rec_t *rec; /* record data */
1169
1170 rec = XFS_ALLOC_REC_ADDR(block, ptr, cur);
Christoph Hellwig16259e72005-11-02 15:11:25 +11001171 *bno = be32_to_cpu(rec->ar_startblock);
1172 *len = be32_to_cpu(rec->ar_blockcount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 }
1174 *stat = 1;
1175 return 0;
1176}
1177
1178/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 * Insert the current record at the point referenced by cur.
1180 * The cursor may be inconsistent on return if splits have been done.
1181 */
1182int /* error */
1183xfs_alloc_insert(
1184 xfs_btree_cur_t *cur, /* btree cursor */
1185 int *stat) /* success/failure */
1186{
1187 int error; /* error return value */
1188 int i; /* result value, 0 for failure */
1189 int level; /* current level number in btree */
1190 xfs_agblock_t nbno; /* new block number (split result) */
1191 xfs_btree_cur_t *ncur; /* new cursor (split result) */
1192 xfs_alloc_rec_t nrec; /* record being inserted this level */
1193 xfs_btree_cur_t *pcur; /* previous level's cursor */
1194
1195 level = 0;
1196 nbno = NULLAGBLOCK;
Christoph Hellwig16259e72005-11-02 15:11:25 +11001197 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
1198 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
Nathan Scott1121b212006-09-28 10:58:40 +10001199 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 pcur = cur;
1201 /*
1202 * Loop going up the tree, starting at the leaf level.
1203 * Stop when we don't get a split block, that must mean that
1204 * the insert is finished with this level.
1205 */
1206 do {
1207 /*
1208 * Insert nrec/nbno into this level of the tree.
1209 * Note if we fail, nbno will be null.
1210 */
1211 if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
1212 &i))) {
1213 if (pcur != cur)
1214 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1215 return error;
1216 }
1217 /*
1218 * See if the cursor we just used is trash.
1219 * Can't trash the caller's cursor, but otherwise we should
1220 * if ncur is a new cursor or we're about to be done.
1221 */
1222 if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
1223 cur->bc_nlevels = pcur->bc_nlevels;
1224 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1225 }
1226 /*
1227 * If we got a new cursor, switch to it.
1228 */
1229 if (ncur) {
1230 pcur = ncur;
Nathan Scott1121b212006-09-28 10:58:40 +10001231 ncur = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
1233 } while (nbno != NULLAGBLOCK);
1234 *stat = i;
1235 return 0;
1236}
1237
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001238STATIC struct xfs_btree_cur *
1239xfs_allocbt_dup_cursor(
1240 struct xfs_btree_cur *cur)
1241{
1242 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
1243 cur->bc_private.a.agbp, cur->bc_private.a.agno,
1244 cur->bc_btnum);
1245}
1246
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +11001247STATIC int
1248xfs_allocbt_alloc_block(
1249 struct xfs_btree_cur *cur,
1250 union xfs_btree_ptr *start,
1251 union xfs_btree_ptr *new,
1252 int length,
1253 int *stat)
1254{
1255 int error;
1256 xfs_agblock_t bno;
1257
1258 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1259
1260 /* Allocate the new block from the freelist. If we can't, give up. */
1261 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
1262 &bno, 1);
1263 if (error) {
1264 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1265 return error;
1266 }
1267
1268 if (bno == NULLAGBLOCK) {
1269 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1270 *stat = 0;
1271 return 0;
1272 }
1273
1274 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1275 new->s = cpu_to_be32(bno);
1276
1277 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1278 *stat = 1;
1279 return 0;
1280}
1281
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001282/*
1283 * Update the longest extent in the AGF
1284 */
1285STATIC void
1286xfs_allocbt_update_lastrec(
1287 struct xfs_btree_cur *cur,
1288 struct xfs_btree_block *block,
1289 union xfs_btree_rec *rec,
1290 int ptr,
1291 int reason)
1292{
1293 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1294 xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
1295 __be32 len;
1296
1297 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
1298
1299 switch (reason) {
1300 case LASTREC_UPDATE:
1301 /*
1302 * If this is the last leaf block and it's the last record,
1303 * then update the size of the longest extent in the AG.
1304 */
1305 if (ptr != xfs_btree_get_numrecs(block))
1306 return;
1307 len = rec->alloc.ar_blockcount;
1308 break;
1309 default:
1310 ASSERT(0);
1311 return;
1312 }
1313
1314 agf->agf_longest = len;
1315 cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
1316 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
1317}
1318
Christoph Hellwigce5e42d2008-10-30 16:55:23 +11001319STATIC int
1320xfs_allocbt_get_maxrecs(
1321 struct xfs_btree_cur *cur,
1322 int level)
1323{
1324 return cur->bc_mp->m_alloc_mxr[level != 0];
1325}
1326
Christoph Hellwigfe033cc2008-10-30 16:56:09 +11001327STATIC void
1328xfs_allocbt_init_key_from_rec(
1329 union xfs_btree_key *key,
1330 union xfs_btree_rec *rec)
1331{
1332 ASSERT(rec->alloc.ar_startblock != 0);
1333
1334 key->alloc.ar_startblock = rec->alloc.ar_startblock;
1335 key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
1336}
1337
1338STATIC void
1339xfs_allocbt_init_ptr_from_cur(
1340 struct xfs_btree_cur *cur,
1341 union xfs_btree_ptr *ptr)
1342{
1343 struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
1344
1345 ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
1346 ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
1347
1348 ptr->s = agf->agf_roots[cur->bc_btnum];
1349}
1350
1351STATIC __int64_t
1352xfs_allocbt_key_diff(
1353 struct xfs_btree_cur *cur,
1354 union xfs_btree_key *key)
1355{
1356 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
1357 xfs_alloc_key_t *kp = &key->alloc;
1358 __int64_t diff;
1359
1360 if (cur->bc_btnum == XFS_BTNUM_BNO) {
1361 return (__int64_t)be32_to_cpu(kp->ar_startblock) -
1362 rec->ar_startblock;
1363 }
1364
1365 diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
1366 if (diff)
1367 return diff;
1368
1369 return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
1370}
1371
Christoph Hellwig8c4ed632008-10-30 16:55:13 +11001372#ifdef XFS_BTREE_TRACE
1373ktrace_t *xfs_allocbt_trace_buf;
1374
1375STATIC void
1376xfs_allocbt_trace_enter(
1377 struct xfs_btree_cur *cur,
1378 const char *func,
1379 char *s,
1380 int type,
1381 int line,
1382 __psunsigned_t a0,
1383 __psunsigned_t a1,
1384 __psunsigned_t a2,
1385 __psunsigned_t a3,
1386 __psunsigned_t a4,
1387 __psunsigned_t a5,
1388 __psunsigned_t a6,
1389 __psunsigned_t a7,
1390 __psunsigned_t a8,
1391 __psunsigned_t a9,
1392 __psunsigned_t a10)
1393{
1394 ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
1395 (void *)func, (void *)s, NULL, (void *)cur,
1396 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
1397 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
1398 (void *)a8, (void *)a9, (void *)a10);
1399}
1400
1401STATIC void
1402xfs_allocbt_trace_cursor(
1403 struct xfs_btree_cur *cur,
1404 __uint32_t *s0,
1405 __uint64_t *l0,
1406 __uint64_t *l1)
1407{
1408 *s0 = cur->bc_private.a.agno;
1409 *l0 = cur->bc_rec.a.ar_startblock;
1410 *l1 = cur->bc_rec.a.ar_blockcount;
1411}
1412
1413STATIC void
1414xfs_allocbt_trace_key(
1415 struct xfs_btree_cur *cur,
1416 union xfs_btree_key *key,
1417 __uint64_t *l0,
1418 __uint64_t *l1)
1419{
1420 *l0 = be32_to_cpu(key->alloc.ar_startblock);
1421 *l1 = be32_to_cpu(key->alloc.ar_blockcount);
1422}
1423
1424STATIC void
1425xfs_allocbt_trace_record(
1426 struct xfs_btree_cur *cur,
1427 union xfs_btree_rec *rec,
1428 __uint64_t *l0,
1429 __uint64_t *l1,
1430 __uint64_t *l2)
1431{
1432 *l0 = be32_to_cpu(rec->alloc.ar_startblock);
1433 *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
1434 *l2 = 0;
1435}
1436#endif /* XFS_BTREE_TRACE */
1437
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001438static const struct xfs_btree_ops xfs_allocbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +11001439 .rec_len = sizeof(xfs_alloc_rec_t),
1440 .key_len = sizeof(xfs_alloc_key_t),
1441
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001442 .dup_cursor = xfs_allocbt_dup_cursor,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +11001443 .alloc_block = xfs_allocbt_alloc_block,
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001444 .update_lastrec = xfs_allocbt_update_lastrec,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +11001445 .get_maxrecs = xfs_allocbt_get_maxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +11001446 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
1447 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
1448 .key_diff = xfs_allocbt_key_diff,
Christoph Hellwig8c4ed632008-10-30 16:55:13 +11001449
1450#ifdef XFS_BTREE_TRACE
1451 .trace_enter = xfs_allocbt_trace_enter,
1452 .trace_cursor = xfs_allocbt_trace_cursor,
1453 .trace_key = xfs_allocbt_trace_key,
1454 .trace_record = xfs_allocbt_trace_record,
1455#endif
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001456};
1457
1458/*
1459 * Allocate a new allocation btree cursor.
1460 */
1461struct xfs_btree_cur * /* new alloc btree cursor */
1462xfs_allocbt_init_cursor(
1463 struct xfs_mount *mp, /* file system mount point */
1464 struct xfs_trans *tp, /* transaction pointer */
1465 struct xfs_buf *agbp, /* buffer for agf structure */
1466 xfs_agnumber_t agno, /* allocation group number */
1467 xfs_btnum_t btnum) /* btree identifier */
1468{
1469 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
1470 struct xfs_btree_cur *cur;
1471
1472 ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
1473
1474 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
1475
1476 cur->bc_tp = tp;
1477 cur->bc_mp = mp;
1478 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
1479 cur->bc_btnum = btnum;
1480 cur->bc_blocklog = mp->m_sb.sb_blocklog;
1481
1482 cur->bc_ops = &xfs_allocbt_ops;
Christoph Hellwig278d0ca2008-10-30 16:56:32 +11001483 if (btnum == XFS_BTNUM_CNT)
1484 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
Christoph Hellwig561f7d12008-10-30 16:53:59 +11001485
1486 cur->bc_private.a.agbp = agbp;
1487 cur->bc_private.a.agno = agno;
1488
1489 return cur;
1490}