blob: 4570da07eb06aab3aba68d41ce67c71726570aab [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong6413a012016-10-03 09:11:25 -07002/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
Darrick J. Wong6413a012016-10-03 09:11:25 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong6413a012016-10-03 09:11:25 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070011#include "xfs_bit.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -070012#include "xfs_shared.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070013#include "xfs_mount.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070014#include "xfs_defer.h"
15#include "xfs_inode.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070016#include "xfs_trans.h"
17#include "xfs_trans_priv.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070018#include "xfs_bmap_item.h"
19#include "xfs_log.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070020#include "xfs_bmap.h"
21#include "xfs_icache.h"
Darrick J. Wongfe0be232017-04-12 12:26:07 -070022#include "xfs_bmap_btree.h"
23#include "xfs_trans_space.h"
Darrick J. Wonga5155b82019-11-02 09:40:53 -070024#include "xfs_error.h"
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -070025#include "xfs_log_priv.h"
Darrick J. Wong86ffa472020-05-01 16:00:45 -070026#include "xfs_log_recover.h"
Darrick J. Wong2dbf8722020-09-21 09:15:10 -070027#include "xfs_quota.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070028
29kmem_zone_t *xfs_bui_zone;
30kmem_zone_t *xfs_bud_zone;
31
Darrick J. Wong9329ba82020-05-01 16:00:52 -070032static const struct xfs_item_ops xfs_bui_item_ops;
33
Darrick J. Wong6413a012016-10-03 09:11:25 -070034static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
35{
36 return container_of(lip, struct xfs_bui_log_item, bui_item);
37}
38
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -070039STATIC void
Darrick J. Wong6413a012016-10-03 09:11:25 -070040xfs_bui_item_free(
41 struct xfs_bui_log_item *buip)
42{
Carlos Maiolino377bcd52019-11-14 12:43:04 -080043 kmem_cache_free(xfs_bui_zone, buip);
Darrick J. Wong6413a012016-10-03 09:11:25 -070044}
45
Dave Chinner0612d112018-04-02 20:08:27 -070046/*
47 * Freeing the BUI requires that we remove it from the AIL if it has already
48 * been placed there. However, the BUI may not yet have been placed in the AIL
49 * when called by xfs_bui_release() from BUD processing due to the ordering of
50 * committed vs unpin operations in bulk insert operations. Hence the reference
51 * count to ensure only the last caller frees the BUI.
52 */
Darrick J. Wong9329ba82020-05-01 16:00:52 -070053STATIC void
Dave Chinner0612d112018-04-02 20:08:27 -070054xfs_bui_release(
55 struct xfs_bui_log_item *buip)
56{
57 ASSERT(atomic_read(&buip->bui_refcount) > 0);
58 if (atomic_dec_and_test(&buip->bui_refcount)) {
Brian Foster65587922020-05-06 13:25:23 -070059 xfs_trans_ail_delete(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
Dave Chinner0612d112018-04-02 20:08:27 -070060 xfs_bui_item_free(buip);
61 }
62}
63
64
Darrick J. Wong6413a012016-10-03 09:11:25 -070065STATIC void
66xfs_bui_item_size(
67 struct xfs_log_item *lip,
68 int *nvecs,
69 int *nbytes)
70{
71 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
72
73 *nvecs += 1;
74 *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
75}
76
77/*
78 * This is called to fill in the vector of log iovecs for the
79 * given bui log item. We use only 1 iovec, and we point that
80 * at the bui_log_format structure embedded in the bui item.
81 * It is at this point that we assert that all of the extent
82 * slots in the bui item have been filled.
83 */
84STATIC void
85xfs_bui_item_format(
86 struct xfs_log_item *lip,
87 struct xfs_log_vec *lv)
88{
89 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
90 struct xfs_log_iovec *vecp = NULL;
91
92 ASSERT(atomic_read(&buip->bui_next_extent) ==
93 buip->bui_format.bui_nextents);
94
95 buip->bui_format.bui_type = XFS_LI_BUI;
96 buip->bui_format.bui_size = 1;
97
98 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
99 xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
100}
101
102/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700103 * The unpin operation is the last place an BUI is manipulated in the log. It is
104 * either inserted in the AIL or aborted in the event of a log I/O error. In
105 * either case, the BUI transaction has been successfully committed to make it
106 * this far. Therefore, we expect whoever committed the BUI to either construct
107 * and commit the BUD or drop the BUD's reference in the event of error. Simply
108 * drop the log's BUI reference now that the log is done with it.
109 */
110STATIC void
111xfs_bui_item_unpin(
112 struct xfs_log_item *lip,
113 int remove)
114{
115 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
116
117 xfs_bui_release(buip);
118}
119
120/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700121 * The BUI has been either committed or aborted if the transaction has been
122 * cancelled. If the transaction was cancelled, an BUD isn't going to be
123 * constructed and thus we free the BUI here directly.
124 */
125STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700126xfs_bui_item_release(
Darrick J. Wong6413a012016-10-03 09:11:25 -0700127 struct xfs_log_item *lip)
128{
Christoph Hellwigddf92052019-06-28 19:27:32 -0700129 xfs_bui_release(BUI_ITEM(lip));
Darrick J. Wong6413a012016-10-03 09:11:25 -0700130}
131
Darrick J. Wong6413a012016-10-03 09:11:25 -0700132/*
133 * Allocate and initialize an bui item with the given number of extents.
134 */
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700135STATIC struct xfs_bui_log_item *
Darrick J. Wong6413a012016-10-03 09:11:25 -0700136xfs_bui_init(
137 struct xfs_mount *mp)
138
139{
140 struct xfs_bui_log_item *buip;
141
Carlos Maiolino32a2b112020-07-22 09:23:10 -0700142 buip = kmem_cache_zalloc(xfs_bui_zone, GFP_KERNEL | __GFP_NOFAIL);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700143
144 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
145 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
146 buip->bui_format.bui_id = (uintptr_t)(void *)buip;
147 atomic_set(&buip->bui_next_extent, 0);
148 atomic_set(&buip->bui_refcount, 2);
149
150 return buip;
151}
152
Darrick J. Wong6413a012016-10-03 09:11:25 -0700153static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
154{
155 return container_of(lip, struct xfs_bud_log_item, bud_item);
156}
157
158STATIC void
159xfs_bud_item_size(
160 struct xfs_log_item *lip,
161 int *nvecs,
162 int *nbytes)
163{
164 *nvecs += 1;
165 *nbytes += sizeof(struct xfs_bud_log_format);
166}
167
168/*
169 * This is called to fill in the vector of log iovecs for the
170 * given bud log item. We use only 1 iovec, and we point that
171 * at the bud_log_format structure embedded in the bud item.
172 * It is at this point that we assert that all of the extent
173 * slots in the bud item have been filled.
174 */
175STATIC void
176xfs_bud_item_format(
177 struct xfs_log_item *lip,
178 struct xfs_log_vec *lv)
179{
180 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
181 struct xfs_log_iovec *vecp = NULL;
182
183 budp->bud_format.bud_type = XFS_LI_BUD;
184 budp->bud_format.bud_size = 1;
185
186 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
187 sizeof(struct xfs_bud_log_format));
188}
189
190/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700191 * The BUD is either committed or aborted if the transaction is cancelled. If
192 * the transaction is cancelled, drop our reference to the BUI and free the
193 * BUD.
194 */
195STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700196xfs_bud_item_release(
Darrick J. Wong6413a012016-10-03 09:11:25 -0700197 struct xfs_log_item *lip)
198{
199 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
200
Christoph Hellwigddf92052019-06-28 19:27:32 -0700201 xfs_bui_release(budp->bud_buip);
Carlos Maiolino377bcd52019-11-14 12:43:04 -0800202 kmem_cache_free(xfs_bud_zone, budp);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700203}
204
Darrick J. Wong6413a012016-10-03 09:11:25 -0700205static const struct xfs_item_ops xfs_bud_item_ops = {
Christoph Hellwig9ce632a2019-06-28 19:27:32 -0700206 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700207 .iop_size = xfs_bud_item_size,
208 .iop_format = xfs_bud_item_format,
Christoph Hellwigddf92052019-06-28 19:27:32 -0700209 .iop_release = xfs_bud_item_release,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700210};
211
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700212static struct xfs_bud_log_item *
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700213xfs_trans_get_bud(
214 struct xfs_trans *tp,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700215 struct xfs_bui_log_item *buip)
Darrick J. Wong6413a012016-10-03 09:11:25 -0700216{
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700217 struct xfs_bud_log_item *budp;
Darrick J. Wong6413a012016-10-03 09:11:25 -0700218
Carlos Maiolino32a2b112020-07-22 09:23:10 -0700219 budp = kmem_cache_zalloc(xfs_bud_zone, GFP_KERNEL | __GFP_NOFAIL);
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700220 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
221 &xfs_bud_item_ops);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700222 budp->bud_buip = buip;
223 budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
224
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700225 xfs_trans_add_item(tp, &budp->bud_item);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700226 return budp;
227}
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700228
229/*
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700230 * Finish an bmap update and log it to the BUD. Note that the
231 * transaction is marked dirty regardless of whether the bmap update
232 * succeeds or fails to support the BUI/BUD lifecycle rules.
233 */
234static int
235xfs_trans_log_finish_bmap_update(
236 struct xfs_trans *tp,
237 struct xfs_bud_log_item *budp,
238 enum xfs_bmap_intent_type type,
239 struct xfs_inode *ip,
240 int whichfork,
241 xfs_fileoff_t startoff,
242 xfs_fsblock_t startblock,
243 xfs_filblks_t *blockcount,
244 xfs_exntst_t state)
245{
246 int error;
247
248 error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff,
249 startblock, blockcount, state);
250
251 /*
252 * Mark the transaction dirty, even on error. This ensures the
253 * transaction is aborted, which:
254 *
255 * 1.) releases the BUI and frees the BUD
256 * 2.) shuts down the filesystem
257 */
258 tp->t_flags |= XFS_TRANS_DIRTY;
259 set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags);
260
261 return error;
262}
263
264/* Sort bmap intents by inode. */
265static int
266xfs_bmap_update_diff_items(
267 void *priv,
268 struct list_head *a,
269 struct list_head *b)
270{
271 struct xfs_bmap_intent *ba;
272 struct xfs_bmap_intent *bb;
273
274 ba = container_of(a, struct xfs_bmap_intent, bi_list);
275 bb = container_of(b, struct xfs_bmap_intent, bi_list);
276 return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
277}
278
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700279/* Set the map extent flags for this mapping. */
280static void
281xfs_trans_set_bmap_flags(
282 struct xfs_map_extent *bmap,
283 enum xfs_bmap_intent_type type,
284 int whichfork,
285 xfs_exntst_t state)
286{
287 bmap->me_flags = 0;
288 switch (type) {
289 case XFS_BMAP_MAP:
290 case XFS_BMAP_UNMAP:
291 bmap->me_flags = type;
292 break;
293 default:
294 ASSERT(0);
295 }
296 if (state == XFS_EXT_UNWRITTEN)
297 bmap->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
298 if (whichfork == XFS_ATTR_FORK)
299 bmap->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
300}
301
302/* Log bmap updates in the intent item. */
303STATIC void
304xfs_bmap_update_log_item(
305 struct xfs_trans *tp,
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700306 struct xfs_bui_log_item *buip,
307 struct xfs_bmap_intent *bmap)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700308{
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700309 uint next_extent;
310 struct xfs_map_extent *map;
311
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700312 tp->t_flags |= XFS_TRANS_DIRTY;
313 set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
314
315 /*
316 * atomic_inc_return gives us the value after the increment;
317 * we want to use it as an array index so we need to subtract 1 from
318 * it.
319 */
320 next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
321 ASSERT(next_extent < buip->bui_format.bui_nextents);
322 map = &buip->bui_format.bui_extents[next_extent];
323 map->me_owner = bmap->bi_owner->i_ino;
324 map->me_startblock = bmap->bi_bmap.br_startblock;
325 map->me_startoff = bmap->bi_bmap.br_startoff;
326 map->me_len = bmap->bi_bmap.br_blockcount;
327 xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork,
328 bmap->bi_bmap.br_state);
329}
330
Christoph Hellwig13a83332020-04-30 12:52:21 -0700331static struct xfs_log_item *
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700332xfs_bmap_update_create_intent(
333 struct xfs_trans *tp,
334 struct list_head *items,
Christoph Hellwigd367a862020-04-30 12:52:20 -0700335 unsigned int count,
336 bool sort)
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700337{
Christoph Hellwigd367a862020-04-30 12:52:20 -0700338 struct xfs_mount *mp = tp->t_mountp;
339 struct xfs_bui_log_item *buip = xfs_bui_init(mp);
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700340 struct xfs_bmap_intent *bmap;
341
342 ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
343
344 xfs_trans_add_item(tp, &buip->bui_item);
Christoph Hellwigd367a862020-04-30 12:52:20 -0700345 if (sort)
346 list_sort(mp, items, xfs_bmap_update_diff_items);
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700347 list_for_each_entry(bmap, items, bi_list)
348 xfs_bmap_update_log_item(tp, buip, bmap);
Christoph Hellwig13a83332020-04-30 12:52:21 -0700349 return &buip->bui_item;
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700350}
351
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700352/* Get an BUD so we can process all the deferred rmap updates. */
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700353static struct xfs_log_item *
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700354xfs_bmap_update_create_done(
355 struct xfs_trans *tp,
Christoph Hellwig13a83332020-04-30 12:52:21 -0700356 struct xfs_log_item *intent,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700357 unsigned int count)
358{
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700359 return &xfs_trans_get_bud(tp, BUI_ITEM(intent))->bud_item;
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700360}
361
362/* Process a deferred rmap update. */
363STATIC int
364xfs_bmap_update_finish_item(
365 struct xfs_trans *tp,
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700366 struct xfs_log_item *done,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700367 struct list_head *item,
Christoph Hellwig3ec1b262020-04-30 12:52:22 -0700368 struct xfs_btree_cur **state)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700369{
370 struct xfs_bmap_intent *bmap;
371 xfs_filblks_t count;
372 int error;
373
374 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
375 count = bmap->bi_bmap.br_blockcount;
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700376 error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done),
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700377 bmap->bi_type,
378 bmap->bi_owner, bmap->bi_whichfork,
379 bmap->bi_bmap.br_startoff,
380 bmap->bi_bmap.br_startblock,
381 &count,
382 bmap->bi_bmap.br_state);
383 if (!error && count > 0) {
384 ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
385 bmap->bi_bmap.br_blockcount = count;
386 return -EAGAIN;
387 }
388 kmem_free(bmap);
389 return error;
390}
391
392/* Abort all pending BUIs. */
393STATIC void
394xfs_bmap_update_abort_intent(
Christoph Hellwig13a83332020-04-30 12:52:21 -0700395 struct xfs_log_item *intent)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700396{
Christoph Hellwig13a83332020-04-30 12:52:21 -0700397 xfs_bui_release(BUI_ITEM(intent));
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700398}
399
400/* Cancel a deferred rmap update. */
401STATIC void
402xfs_bmap_update_cancel_item(
403 struct list_head *item)
404{
405 struct xfs_bmap_intent *bmap;
406
407 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
408 kmem_free(bmap);
409}
410
411const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
412 .max_items = XFS_BUI_MAX_FAST_EXTENTS,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700413 .create_intent = xfs_bmap_update_create_intent,
414 .abort_intent = xfs_bmap_update_abort_intent,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700415 .create_done = xfs_bmap_update_create_done,
416 .finish_item = xfs_bmap_update_finish_item,
417 .cancel_item = xfs_bmap_update_cancel_item,
418};
419
420/*
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700421 * Process a bmap update intent item that was recovered from the log.
422 * We need to update some inode's bmbt.
423 */
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700424STATIC int
Darrick J. Wong96b60f82020-05-01 16:00:55 -0700425xfs_bui_item_recover(
426 struct xfs_log_item *lip,
Darrick J. Wonge6fff812020-09-25 17:39:37 -0700427 struct list_head *capture_list)
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700428{
Darrick J. Wong96b60f82020-05-01 16:00:55 -0700429 struct xfs_bmbt_irec irec;
430 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
431 struct xfs_trans *tp;
432 struct xfs_inode *ip = NULL;
Darrick J. Wonge6fff812020-09-25 17:39:37 -0700433 struct xfs_mount *mp = lip->li_mountp;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700434 struct xfs_map_extent *bmap;
Darrick J. Wong96b60f82020-05-01 16:00:55 -0700435 struct xfs_bud_log_item *budp;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700436 xfs_fsblock_t startblock_fsb;
437 xfs_fsblock_t inode_fsb;
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700438 xfs_filblks_t count;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700439 xfs_exntst_t state;
Darrick J. Wong96b60f82020-05-01 16:00:55 -0700440 unsigned int bui_type;
441 int whichfork;
442 int error = 0;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700443
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700444 /* Only one mapping operation per BUI... */
Darrick J. Wong384ff092020-09-21 09:15:10 -0700445 if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
Darrick J. Wong895e1962019-11-06 09:17:43 -0800446 return -EFSCORRUPTED;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700447
448 /*
449 * First check the validity of the extent described by the
450 * BUI. If anything is bad, then toss the BUI.
451 */
452 bmap = &buip->bui_format.bui_extents[0];
453 startblock_fsb = XFS_BB_TO_FSB(mp,
454 XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
455 inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
456 XFS_INO_TO_FSB(mp, bmap->me_owner)));
Darrick J. Wong919522e2020-09-25 17:39:50 -0700457 state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
458 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
459 whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
460 XFS_ATTR_FORK : XFS_DATA_FORK;
461 bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
462 switch (bui_type) {
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700463 case XFS_BMAP_MAP:
464 case XFS_BMAP_UNMAP:
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700465 break;
466 default:
Darrick J. Wong919522e2020-09-25 17:39:50 -0700467 return -EFSCORRUPTED;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700468 }
Darrick J. Wong919522e2020-09-25 17:39:50 -0700469 if (startblock_fsb == 0 ||
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700470 bmap->me_len == 0 ||
471 inode_fsb == 0 ||
472 startblock_fsb >= mp->m_sb.sb_dblocks ||
473 bmap->me_len >= mp->m_sb.sb_agblocks ||
474 inode_fsb >= mp->m_sb.sb_dblocks ||
Darrick J. Wong384ff092020-09-21 09:15:10 -0700475 (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS))
Darrick J. Wong895e1962019-11-06 09:17:43 -0800476 return -EFSCORRUPTED;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700477
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700478 /* Grab the inode. */
479 error = xfs_iget(mp, NULL, bmap->me_owner, 0, 0, &ip);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700480 if (error)
481 return error;
Darrick J. Wonge6fff812020-09-25 17:39:37 -0700482
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700483 error = xfs_qm_dqattach(ip);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700484 if (error)
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700485 goto err_rele;
Darrick J. Wong2dbf8722020-09-21 09:15:10 -0700486
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700487 if (VFS_I(ip)->i_nlink == 0)
488 xfs_iflags_set(ip, XFS_IRECOVERY);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700489
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700490 /* Allocate transaction and do the work. */
491 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
492 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
493 if (error)
494 goto err_rele;
495
496 budp = xfs_trans_get_bud(tp, buip);
497 xfs_ilock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700498 xfs_trans_ijoin(tp, ip, 0);
499
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700500 count = bmap->me_len;
Darrick J. Wong919522e2020-09-25 17:39:50 -0700501 error = xfs_trans_log_finish_bmap_update(tp, budp, bui_type, ip,
502 whichfork, bmap->me_startoff, bmap->me_startblock,
503 &count, state);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700504 if (error)
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700505 goto err_cancel;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700506
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700507 if (count > 0) {
Darrick J. Wong919522e2020-09-25 17:39:50 -0700508 ASSERT(bui_type == XFS_BMAP_UNMAP);
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700509 irec.br_startblock = bmap->me_startblock;
510 irec.br_blockcount = count;
511 irec.br_startoff = bmap->me_startoff;
512 irec.br_state = state;
Darrick J. Wong3e08f422019-08-26 17:06:04 -0700513 xfs_bmap_unmap_extent(tp, ip, &irec);
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700514 }
515
Darrick J. Wongff4ab5e2020-09-25 17:39:51 -0700516 /*
517 * Commit transaction, which frees the transaction and saves the inode
518 * for later replay activities.
519 */
520 error = xfs_defer_ops_capture_and_commit(tp, ip, capture_list);
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700521 if (error)
522 goto err_unlock;
523
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700524 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong44a87362018-07-25 12:52:32 -0700525 xfs_irele(ip);
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700526 return 0;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700527
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700528err_cancel:
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700529 xfs_trans_cancel(tp);
Darrick J. Wong64a3f332020-09-25 17:39:50 -0700530err_unlock:
531 xfs_iunlock(ip, XFS_ILOCK_EXCL);
532err_rele:
533 xfs_irele(ip);
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700534 return error;
535}
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700536
Darrick J. Wong154c7332020-05-01 16:00:54 -0700537STATIC bool
538xfs_bui_item_match(
539 struct xfs_log_item *lip,
540 uint64_t intent_id)
541{
542 return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
543}
544
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700545static const struct xfs_item_ops xfs_bui_item_ops = {
546 .iop_size = xfs_bui_item_size,
547 .iop_format = xfs_bui_item_format,
548 .iop_unpin = xfs_bui_item_unpin,
549 .iop_release = xfs_bui_item_release,
550 .iop_recover = xfs_bui_item_recover,
Darrick J. Wong154c7332020-05-01 16:00:54 -0700551 .iop_match = xfs_bui_item_match,
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700552};
553
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700554/*
555 * Copy an BUI format buffer from the given buf, and into the destination
556 * BUI format structure. The BUI/BUD items were designed not to need any
557 * special alignment handling.
558 */
559static int
560xfs_bui_copy_format(
561 struct xfs_log_iovec *buf,
562 struct xfs_bui_log_format *dst_bui_fmt)
563{
564 struct xfs_bui_log_format *src_bui_fmt;
565 uint len;
566
567 src_bui_fmt = buf->i_addr;
568 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
569
570 if (buf->i_len == len) {
571 memcpy(dst_bui_fmt, src_bui_fmt, len);
572 return 0;
573 }
574 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
575 return -EFSCORRUPTED;
576}
577
578/*
579 * This routine is called to create an in-core extent bmap update
580 * item from the bui format structure which was logged on disk.
581 * It allocates an in-core bui, copies the extents from the format
582 * structure into it, and adds the bui to the AIL with the given
583 * LSN.
584 */
585STATIC int
586xlog_recover_bui_commit_pass2(
587 struct xlog *log,
588 struct list_head *buffer_list,
589 struct xlog_recover_item *item,
590 xfs_lsn_t lsn)
591{
592 int error;
593 struct xfs_mount *mp = log->l_mp;
594 struct xfs_bui_log_item *buip;
595 struct xfs_bui_log_format *bui_formatp;
596
597 bui_formatp = item->ri_buf[0].i_addr;
598
599 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
600 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
601 return -EFSCORRUPTED;
602 }
603 buip = xfs_bui_init(mp);
604 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
605 if (error) {
606 xfs_bui_item_free(buip);
607 return error;
608 }
609 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700610 /*
Darrick J. Wong86a37172020-05-01 16:00:54 -0700611 * Insert the intent into the AIL directly and drop one reference so
612 * that finishing or canceling the work will drop the other.
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700613 */
Darrick J. Wong86a37172020-05-01 16:00:54 -0700614 xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn);
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700615 xfs_bui_release(buip);
616 return 0;
617}
618
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700619const struct xlog_recover_item_ops xlog_bui_item_ops = {
620 .item_type = XFS_LI_BUI,
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700621 .commit_pass2 = xlog_recover_bui_commit_pass2,
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700622};
623
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700624/*
625 * This routine is called when an BUD format structure is found in a committed
626 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
627 * was still in the log. To do this it searches the AIL for the BUI with an id
628 * equal to that in the BUD format structure. If we find it we drop the BUD
629 * reference, which removes the BUI from the AIL and frees it.
630 */
631STATIC int
632xlog_recover_bud_commit_pass2(
633 struct xlog *log,
634 struct list_head *buffer_list,
635 struct xlog_recover_item *item,
636 xfs_lsn_t lsn)
637{
638 struct xfs_bud_log_format *bud_formatp;
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700639
640 bud_formatp = item->ri_buf[0].i_addr;
641 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
642 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
643 return -EFSCORRUPTED;
644 }
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700645
Darrick J. Wong154c7332020-05-01 16:00:54 -0700646 xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700647 return 0;
648}
649
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700650const struct xlog_recover_item_ops xlog_bud_item_ops = {
651 .item_type = XFS_LI_BUD,
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700652 .commit_pass2 = xlog_recover_bud_commit_pass2,
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700653};