blob: b3996f361b87c5cd8c733eaee324f4de1c5c0d95 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong6413a012016-10-03 09:11:25 -07002/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
Darrick J. Wong6413a012016-10-03 09:11:25 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong6413a012016-10-03 09:11:25 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070011#include "xfs_bit.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -070012#include "xfs_shared.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070013#include "xfs_mount.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070014#include "xfs_defer.h"
15#include "xfs_inode.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070016#include "xfs_trans.h"
17#include "xfs_trans_priv.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070018#include "xfs_bmap_item.h"
19#include "xfs_log.h"
Darrick J. Wong77d61fe2016-10-03 09:11:26 -070020#include "xfs_bmap.h"
21#include "xfs_icache.h"
Darrick J. Wongfe0be232017-04-12 12:26:07 -070022#include "xfs_bmap_btree.h"
23#include "xfs_trans_space.h"
Darrick J. Wonga5155b82019-11-02 09:40:53 -070024#include "xfs_error.h"
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -070025#include "xfs_log_priv.h"
Darrick J. Wong86ffa472020-05-01 16:00:45 -070026#include "xfs_log_recover.h"
Darrick J. Wong6413a012016-10-03 09:11:25 -070027
28kmem_zone_t *xfs_bui_zone;
29kmem_zone_t *xfs_bud_zone;
30
Darrick J. Wong9329ba82020-05-01 16:00:52 -070031static const struct xfs_item_ops xfs_bui_item_ops;
32
Darrick J. Wong6413a012016-10-03 09:11:25 -070033static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
34{
35 return container_of(lip, struct xfs_bui_log_item, bui_item);
36}
37
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -070038STATIC void
Darrick J. Wong6413a012016-10-03 09:11:25 -070039xfs_bui_item_free(
40 struct xfs_bui_log_item *buip)
41{
Carlos Maiolino377bcd52019-11-14 12:43:04 -080042 kmem_cache_free(xfs_bui_zone, buip);
Darrick J. Wong6413a012016-10-03 09:11:25 -070043}
44
Dave Chinner0612d112018-04-02 20:08:27 -070045/*
46 * Freeing the BUI requires that we remove it from the AIL if it has already
47 * been placed there. However, the BUI may not yet have been placed in the AIL
48 * when called by xfs_bui_release() from BUD processing due to the ordering of
49 * committed vs unpin operations in bulk insert operations. Hence the reference
50 * count to ensure only the last caller frees the BUI.
51 */
Darrick J. Wong9329ba82020-05-01 16:00:52 -070052STATIC void
Dave Chinner0612d112018-04-02 20:08:27 -070053xfs_bui_release(
54 struct xfs_bui_log_item *buip)
55{
56 ASSERT(atomic_read(&buip->bui_refcount) > 0);
57 if (atomic_dec_and_test(&buip->bui_refcount)) {
Brian Foster65587922020-05-06 13:25:23 -070058 xfs_trans_ail_delete(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
Dave Chinner0612d112018-04-02 20:08:27 -070059 xfs_bui_item_free(buip);
60 }
61}
62
63
Darrick J. Wong6413a012016-10-03 09:11:25 -070064STATIC void
65xfs_bui_item_size(
66 struct xfs_log_item *lip,
67 int *nvecs,
68 int *nbytes)
69{
70 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
71
72 *nvecs += 1;
73 *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
74}
75
76/*
77 * This is called to fill in the vector of log iovecs for the
78 * given bui log item. We use only 1 iovec, and we point that
79 * at the bui_log_format structure embedded in the bui item.
80 * It is at this point that we assert that all of the extent
81 * slots in the bui item have been filled.
82 */
83STATIC void
84xfs_bui_item_format(
85 struct xfs_log_item *lip,
86 struct xfs_log_vec *lv)
87{
88 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
89 struct xfs_log_iovec *vecp = NULL;
90
91 ASSERT(atomic_read(&buip->bui_next_extent) ==
92 buip->bui_format.bui_nextents);
93
94 buip->bui_format.bui_type = XFS_LI_BUI;
95 buip->bui_format.bui_size = 1;
96
97 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
98 xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
99}
100
101/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700102 * The unpin operation is the last place an BUI is manipulated in the log. It is
103 * either inserted in the AIL or aborted in the event of a log I/O error. In
104 * either case, the BUI transaction has been successfully committed to make it
105 * this far. Therefore, we expect whoever committed the BUI to either construct
106 * and commit the BUD or drop the BUD's reference in the event of error. Simply
107 * drop the log's BUI reference now that the log is done with it.
108 */
109STATIC void
110xfs_bui_item_unpin(
111 struct xfs_log_item *lip,
112 int remove)
113{
114 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
115
116 xfs_bui_release(buip);
117}
118
119/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700120 * The BUI has been either committed or aborted if the transaction has been
121 * cancelled. If the transaction was cancelled, an BUD isn't going to be
122 * constructed and thus we free the BUI here directly.
123 */
124STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700125xfs_bui_item_release(
Darrick J. Wong6413a012016-10-03 09:11:25 -0700126 struct xfs_log_item *lip)
127{
Christoph Hellwigddf92052019-06-28 19:27:32 -0700128 xfs_bui_release(BUI_ITEM(lip));
Darrick J. Wong6413a012016-10-03 09:11:25 -0700129}
130
Darrick J. Wong6413a012016-10-03 09:11:25 -0700131/*
132 * Allocate and initialize an bui item with the given number of extents.
133 */
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700134STATIC struct xfs_bui_log_item *
Darrick J. Wong6413a012016-10-03 09:11:25 -0700135xfs_bui_init(
136 struct xfs_mount *mp)
137
138{
139 struct xfs_bui_log_item *buip;
140
Tetsuo Handa707e0dd2019-08-26 12:06:22 -0700141 buip = kmem_zone_zalloc(xfs_bui_zone, 0);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700142
143 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
144 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
145 buip->bui_format.bui_id = (uintptr_t)(void *)buip;
146 atomic_set(&buip->bui_next_extent, 0);
147 atomic_set(&buip->bui_refcount, 2);
148
149 return buip;
150}
151
Darrick J. Wong6413a012016-10-03 09:11:25 -0700152static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
153{
154 return container_of(lip, struct xfs_bud_log_item, bud_item);
155}
156
157STATIC void
158xfs_bud_item_size(
159 struct xfs_log_item *lip,
160 int *nvecs,
161 int *nbytes)
162{
163 *nvecs += 1;
164 *nbytes += sizeof(struct xfs_bud_log_format);
165}
166
167/*
168 * This is called to fill in the vector of log iovecs for the
169 * given bud log item. We use only 1 iovec, and we point that
170 * at the bud_log_format structure embedded in the bud item.
171 * It is at this point that we assert that all of the extent
172 * slots in the bud item have been filled.
173 */
174STATIC void
175xfs_bud_item_format(
176 struct xfs_log_item *lip,
177 struct xfs_log_vec *lv)
178{
179 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
180 struct xfs_log_iovec *vecp = NULL;
181
182 budp->bud_format.bud_type = XFS_LI_BUD;
183 budp->bud_format.bud_size = 1;
184
185 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
186 sizeof(struct xfs_bud_log_format));
187}
188
189/*
Darrick J. Wong6413a012016-10-03 09:11:25 -0700190 * The BUD is either committed or aborted if the transaction is cancelled. If
191 * the transaction is cancelled, drop our reference to the BUI and free the
192 * BUD.
193 */
194STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700195xfs_bud_item_release(
Darrick J. Wong6413a012016-10-03 09:11:25 -0700196 struct xfs_log_item *lip)
197{
198 struct xfs_bud_log_item *budp = BUD_ITEM(lip);
199
Christoph Hellwigddf92052019-06-28 19:27:32 -0700200 xfs_bui_release(budp->bud_buip);
Carlos Maiolino377bcd52019-11-14 12:43:04 -0800201 kmem_cache_free(xfs_bud_zone, budp);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700202}
203
Darrick J. Wong6413a012016-10-03 09:11:25 -0700204static const struct xfs_item_ops xfs_bud_item_ops = {
Christoph Hellwig9ce632a2019-06-28 19:27:32 -0700205 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700206 .iop_size = xfs_bud_item_size,
207 .iop_format = xfs_bud_item_format,
Christoph Hellwigddf92052019-06-28 19:27:32 -0700208 .iop_release = xfs_bud_item_release,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700209};
210
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700211static struct xfs_bud_log_item *
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700212xfs_trans_get_bud(
213 struct xfs_trans *tp,
Darrick J. Wong6413a012016-10-03 09:11:25 -0700214 struct xfs_bui_log_item *buip)
Darrick J. Wong6413a012016-10-03 09:11:25 -0700215{
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700216 struct xfs_bud_log_item *budp;
Darrick J. Wong6413a012016-10-03 09:11:25 -0700217
Tetsuo Handa707e0dd2019-08-26 12:06:22 -0700218 budp = kmem_zone_zalloc(xfs_bud_zone, 0);
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700219 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
220 &xfs_bud_item_ops);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700221 budp->bud_buip = buip;
222 budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
223
Christoph Hellwig73f0d232019-06-28 19:27:36 -0700224 xfs_trans_add_item(tp, &budp->bud_item);
Darrick J. Wong6413a012016-10-03 09:11:25 -0700225 return budp;
226}
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700227
228/*
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700229 * Finish an bmap update and log it to the BUD. Note that the
230 * transaction is marked dirty regardless of whether the bmap update
231 * succeeds or fails to support the BUI/BUD lifecycle rules.
232 */
233static int
234xfs_trans_log_finish_bmap_update(
235 struct xfs_trans *tp,
236 struct xfs_bud_log_item *budp,
237 enum xfs_bmap_intent_type type,
238 struct xfs_inode *ip,
239 int whichfork,
240 xfs_fileoff_t startoff,
241 xfs_fsblock_t startblock,
242 xfs_filblks_t *blockcount,
243 xfs_exntst_t state)
244{
245 int error;
246
247 error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff,
248 startblock, blockcount, state);
249
250 /*
251 * Mark the transaction dirty, even on error. This ensures the
252 * transaction is aborted, which:
253 *
254 * 1.) releases the BUI and frees the BUD
255 * 2.) shuts down the filesystem
256 */
257 tp->t_flags |= XFS_TRANS_DIRTY;
258 set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags);
259
260 return error;
261}
262
263/* Sort bmap intents by inode. */
264static int
265xfs_bmap_update_diff_items(
266 void *priv,
267 struct list_head *a,
268 struct list_head *b)
269{
270 struct xfs_bmap_intent *ba;
271 struct xfs_bmap_intent *bb;
272
273 ba = container_of(a, struct xfs_bmap_intent, bi_list);
274 bb = container_of(b, struct xfs_bmap_intent, bi_list);
275 return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
276}
277
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700278/* Set the map extent flags for this mapping. */
279static void
280xfs_trans_set_bmap_flags(
281 struct xfs_map_extent *bmap,
282 enum xfs_bmap_intent_type type,
283 int whichfork,
284 xfs_exntst_t state)
285{
286 bmap->me_flags = 0;
287 switch (type) {
288 case XFS_BMAP_MAP:
289 case XFS_BMAP_UNMAP:
290 bmap->me_flags = type;
291 break;
292 default:
293 ASSERT(0);
294 }
295 if (state == XFS_EXT_UNWRITTEN)
296 bmap->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
297 if (whichfork == XFS_ATTR_FORK)
298 bmap->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
299}
300
301/* Log bmap updates in the intent item. */
302STATIC void
303xfs_bmap_update_log_item(
304 struct xfs_trans *tp,
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700305 struct xfs_bui_log_item *buip,
306 struct xfs_bmap_intent *bmap)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700307{
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700308 uint next_extent;
309 struct xfs_map_extent *map;
310
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700311 tp->t_flags |= XFS_TRANS_DIRTY;
312 set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
313
314 /*
315 * atomic_inc_return gives us the value after the increment;
316 * we want to use it as an array index so we need to subtract 1 from
317 * it.
318 */
319 next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
320 ASSERT(next_extent < buip->bui_format.bui_nextents);
321 map = &buip->bui_format.bui_extents[next_extent];
322 map->me_owner = bmap->bi_owner->i_ino;
323 map->me_startblock = bmap->bi_bmap.br_startblock;
324 map->me_startoff = bmap->bi_bmap.br_startoff;
325 map->me_len = bmap->bi_bmap.br_blockcount;
326 xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork,
327 bmap->bi_bmap.br_state);
328}
329
Christoph Hellwig13a83332020-04-30 12:52:21 -0700330static struct xfs_log_item *
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700331xfs_bmap_update_create_intent(
332 struct xfs_trans *tp,
333 struct list_head *items,
Christoph Hellwigd367a862020-04-30 12:52:20 -0700334 unsigned int count,
335 bool sort)
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700336{
Christoph Hellwigd367a862020-04-30 12:52:20 -0700337 struct xfs_mount *mp = tp->t_mountp;
338 struct xfs_bui_log_item *buip = xfs_bui_init(mp);
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700339 struct xfs_bmap_intent *bmap;
340
341 ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
342
343 xfs_trans_add_item(tp, &buip->bui_item);
Christoph Hellwigd367a862020-04-30 12:52:20 -0700344 if (sort)
345 list_sort(mp, items, xfs_bmap_update_diff_items);
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700346 list_for_each_entry(bmap, items, bi_list)
347 xfs_bmap_update_log_item(tp, buip, bmap);
Christoph Hellwig13a83332020-04-30 12:52:21 -0700348 return &buip->bui_item;
Christoph Hellwigc1f09182020-04-30 12:52:20 -0700349}
350
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700351/* Get an BUD so we can process all the deferred rmap updates. */
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700352static struct xfs_log_item *
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700353xfs_bmap_update_create_done(
354 struct xfs_trans *tp,
Christoph Hellwig13a83332020-04-30 12:52:21 -0700355 struct xfs_log_item *intent,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700356 unsigned int count)
357{
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700358 return &xfs_trans_get_bud(tp, BUI_ITEM(intent))->bud_item;
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700359}
360
361/* Process a deferred rmap update. */
362STATIC int
363xfs_bmap_update_finish_item(
364 struct xfs_trans *tp,
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700365 struct xfs_log_item *done,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700366 struct list_head *item,
Christoph Hellwig3ec1b262020-04-30 12:52:22 -0700367 struct xfs_btree_cur **state)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700368{
369 struct xfs_bmap_intent *bmap;
370 xfs_filblks_t count;
371 int error;
372
373 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
374 count = bmap->bi_bmap.br_blockcount;
Christoph Hellwigf09d1672020-04-30 12:52:22 -0700375 error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done),
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700376 bmap->bi_type,
377 bmap->bi_owner, bmap->bi_whichfork,
378 bmap->bi_bmap.br_startoff,
379 bmap->bi_bmap.br_startblock,
380 &count,
381 bmap->bi_bmap.br_state);
382 if (!error && count > 0) {
383 ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
384 bmap->bi_bmap.br_blockcount = count;
385 return -EAGAIN;
386 }
387 kmem_free(bmap);
388 return error;
389}
390
391/* Abort all pending BUIs. */
392STATIC void
393xfs_bmap_update_abort_intent(
Christoph Hellwig13a83332020-04-30 12:52:21 -0700394 struct xfs_log_item *intent)
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700395{
Christoph Hellwig13a83332020-04-30 12:52:21 -0700396 xfs_bui_release(BUI_ITEM(intent));
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700397}
398
399/* Cancel a deferred rmap update. */
400STATIC void
401xfs_bmap_update_cancel_item(
402 struct list_head *item)
403{
404 struct xfs_bmap_intent *bmap;
405
406 bmap = container_of(item, struct xfs_bmap_intent, bi_list);
407 kmem_free(bmap);
408}
409
410const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
411 .max_items = XFS_BUI_MAX_FAST_EXTENTS,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700412 .create_intent = xfs_bmap_update_create_intent,
413 .abort_intent = xfs_bmap_update_abort_intent,
Christoph Hellwigcaeaea92019-06-28 19:29:42 -0700414 .create_done = xfs_bmap_update_create_done,
415 .finish_item = xfs_bmap_update_finish_item,
416 .cancel_item = xfs_bmap_update_cancel_item,
417};
418
419/*
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700420 * Process a bmap update intent item that was recovered from the log.
421 * We need to update some inode's bmbt.
422 */
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700423STATIC int
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700424xfs_bui_recover(
Brian Fosterfbfa9772018-08-01 07:20:29 -0700425 struct xfs_trans *parent_tp,
426 struct xfs_bui_log_item *buip)
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700427{
428 int error = 0;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700429 unsigned int bui_type;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700430 struct xfs_map_extent *bmap;
431 xfs_fsblock_t startblock_fsb;
432 xfs_fsblock_t inode_fsb;
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700433 xfs_filblks_t count;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700434 bool op_ok;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700435 struct xfs_bud_log_item *budp;
436 enum xfs_bmap_intent_type type;
437 int whichfork;
438 xfs_exntst_t state;
439 struct xfs_trans *tp;
440 struct xfs_inode *ip = NULL;
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700441 struct xfs_bmbt_irec irec;
Brian Fosterfbfa9772018-08-01 07:20:29 -0700442 struct xfs_mount *mp = parent_tp->t_mountp;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700443
444 ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
445
446 /* Only one mapping operation per BUI... */
447 if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
448 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
449 xfs_bui_release(buip);
Darrick J. Wong895e1962019-11-06 09:17:43 -0800450 return -EFSCORRUPTED;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700451 }
452
453 /*
454 * First check the validity of the extent described by the
455 * BUI. If anything is bad, then toss the BUI.
456 */
457 bmap = &buip->bui_format.bui_extents[0];
458 startblock_fsb = XFS_BB_TO_FSB(mp,
459 XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
460 inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
461 XFS_INO_TO_FSB(mp, bmap->me_owner)));
462 switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
463 case XFS_BMAP_MAP:
464 case XFS_BMAP_UNMAP:
465 op_ok = true;
466 break;
467 default:
468 op_ok = false;
469 break;
470 }
471 if (!op_ok || startblock_fsb == 0 ||
472 bmap->me_len == 0 ||
473 inode_fsb == 0 ||
474 startblock_fsb >= mp->m_sb.sb_dblocks ||
475 bmap->me_len >= mp->m_sb.sb_agblocks ||
476 inode_fsb >= mp->m_sb.sb_dblocks ||
477 (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) {
478 /*
479 * This will pull the BUI from the AIL and
480 * free the memory associated with it.
481 */
482 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
483 xfs_bui_release(buip);
Darrick J. Wong895e1962019-11-06 09:17:43 -0800484 return -EFSCORRUPTED;
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700485 }
486
Darrick J. Wongfe0be232017-04-12 12:26:07 -0700487 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
488 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700489 if (error)
490 return error;
Brian Foster91ef75b2018-07-24 13:43:13 -0700491 /*
492 * Recovery stashes all deferred ops during intent processing and
493 * finishes them on completion. Transfer current dfops state to this
494 * transaction and transfer the result back before we return.
495 */
Brian Fosterce356d62018-08-01 07:20:30 -0700496 xfs_defer_move(tp, parent_tp);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700497 budp = xfs_trans_get_bud(tp, buip);
498
499 /* Grab the inode. */
500 error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip);
501 if (error)
502 goto err_inode;
503
Darrick J. Wong17c12bc2016-10-03 09:11:29 -0700504 if (VFS_I(ip)->i_nlink == 0)
505 xfs_iflags_set(ip, XFS_IRECOVERY);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700506
507 /* Process deferred bmap item. */
508 state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
509 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
510 whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
511 XFS_ATTR_FORK : XFS_DATA_FORK;
512 bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
513 switch (bui_type) {
514 case XFS_BMAP_MAP:
515 case XFS_BMAP_UNMAP:
516 type = bui_type;
517 break;
518 default:
Darrick J. Wonga5155b82019-11-02 09:40:53 -0700519 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700520 error = -EFSCORRUPTED;
Darrick J. Wong50995582017-11-21 20:53:02 -0800521 goto err_inode;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700522 }
523 xfs_trans_ijoin(tp, ip, 0);
524
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700525 count = bmap->me_len;
Brian Foster7dbddba2018-08-01 07:20:32 -0700526 error = xfs_trans_log_finish_bmap_update(tp, budp, type, ip, whichfork,
527 bmap->me_startoff, bmap->me_startblock, &count, state);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700528 if (error)
Darrick J. Wong50995582017-11-21 20:53:02 -0800529 goto err_inode;
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700530
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700531 if (count > 0) {
532 ASSERT(type == XFS_BMAP_UNMAP);
533 irec.br_startblock = bmap->me_startblock;
534 irec.br_blockcount = count;
535 irec.br_startoff = bmap->me_startoff;
536 irec.br_state = state;
Darrick J. Wong3e08f422019-08-26 17:06:04 -0700537 xfs_bmap_unmap_extent(tp, ip, &irec);
Darrick J. Wonge1a4e372017-06-14 21:25:57 -0700538 }
539
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700540 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
Brian Fosterce356d62018-08-01 07:20:30 -0700541 xfs_defer_move(parent_tp, tp);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700542 error = xfs_trans_commit(tp);
543 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong44a87362018-07-25 12:52:32 -0700544 xfs_irele(ip);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700545
546 return error;
547
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700548err_inode:
Brian Fosterce356d62018-08-01 07:20:30 -0700549 xfs_defer_move(parent_tp, tp);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700550 xfs_trans_cancel(tp);
551 if (ip) {
552 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Darrick J. Wong44a87362018-07-25 12:52:32 -0700553 xfs_irele(ip);
Darrick J. Wong9f3afb52016-10-03 09:11:28 -0700554 }
Darrick J. Wong77d61fe2016-10-03 09:11:26 -0700555 return error;
556}
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700557
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700558/* Recover the BUI if necessary. */
559STATIC int
560xfs_bui_item_recover(
561 struct xfs_log_item *lip,
562 struct xfs_trans *tp)
563{
564 struct xfs_ail *ailp = lip->li_ailp;
565 struct xfs_bui_log_item *buip = BUI_ITEM(lip);
566 int error;
567
568 /*
569 * Skip BUIs that we've already processed.
570 */
571 if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
572 return 0;
573
574 spin_unlock(&ailp->ail_lock);
575 error = xfs_bui_recover(tp, buip);
576 spin_lock(&ailp->ail_lock);
577
578 return error;
579}
580
Darrick J. Wong154c7332020-05-01 16:00:54 -0700581STATIC bool
582xfs_bui_item_match(
583 struct xfs_log_item *lip,
584 uint64_t intent_id)
585{
586 return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
587}
588
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700589static const struct xfs_item_ops xfs_bui_item_ops = {
590 .iop_size = xfs_bui_item_size,
591 .iop_format = xfs_bui_item_format,
592 .iop_unpin = xfs_bui_item_unpin,
593 .iop_release = xfs_bui_item_release,
594 .iop_recover = xfs_bui_item_recover,
Darrick J. Wong154c7332020-05-01 16:00:54 -0700595 .iop_match = xfs_bui_item_match,
Darrick J. Wong9329ba82020-05-01 16:00:52 -0700596};
597
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700598/*
599 * Copy an BUI format buffer from the given buf, and into the destination
600 * BUI format structure. The BUI/BUD items were designed not to need any
601 * special alignment handling.
602 */
603static int
604xfs_bui_copy_format(
605 struct xfs_log_iovec *buf,
606 struct xfs_bui_log_format *dst_bui_fmt)
607{
608 struct xfs_bui_log_format *src_bui_fmt;
609 uint len;
610
611 src_bui_fmt = buf->i_addr;
612 len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
613
614 if (buf->i_len == len) {
615 memcpy(dst_bui_fmt, src_bui_fmt, len);
616 return 0;
617 }
618 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
619 return -EFSCORRUPTED;
620}
621
622/*
623 * This routine is called to create an in-core extent bmap update
624 * item from the bui format structure which was logged on disk.
625 * It allocates an in-core bui, copies the extents from the format
626 * structure into it, and adds the bui to the AIL with the given
627 * LSN.
628 */
629STATIC int
630xlog_recover_bui_commit_pass2(
631 struct xlog *log,
632 struct list_head *buffer_list,
633 struct xlog_recover_item *item,
634 xfs_lsn_t lsn)
635{
636 int error;
637 struct xfs_mount *mp = log->l_mp;
638 struct xfs_bui_log_item *buip;
639 struct xfs_bui_log_format *bui_formatp;
640
641 bui_formatp = item->ri_buf[0].i_addr;
642
643 if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
644 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
645 return -EFSCORRUPTED;
646 }
647 buip = xfs_bui_init(mp);
648 error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
649 if (error) {
650 xfs_bui_item_free(buip);
651 return error;
652 }
653 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
654
655 spin_lock(&log->l_ailp->ail_lock);
656 /*
657 * The RUI has two references. One for the RUD and one for RUI to ensure
658 * it makes it into the AIL. Insert the RUI into the AIL directly and
659 * drop the RUI reference. Note that xfs_trans_ail_update() drops the
660 * AIL lock.
661 */
662 xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
663 xfs_bui_release(buip);
664 return 0;
665}
666
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700667const struct xlog_recover_item_ops xlog_bui_item_ops = {
668 .item_type = XFS_LI_BUI,
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700669 .commit_pass2 = xlog_recover_bui_commit_pass2,
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700670};
671
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700672/*
673 * This routine is called when an BUD format structure is found in a committed
674 * transaction in the log. Its purpose is to cancel the corresponding BUI if it
675 * was still in the log. To do this it searches the AIL for the BUI with an id
676 * equal to that in the BUD format structure. If we find it we drop the BUD
677 * reference, which removes the BUI from the AIL and frees it.
678 */
679STATIC int
680xlog_recover_bud_commit_pass2(
681 struct xlog *log,
682 struct list_head *buffer_list,
683 struct xlog_recover_item *item,
684 xfs_lsn_t lsn)
685{
686 struct xfs_bud_log_format *bud_formatp;
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700687
688 bud_formatp = item->ri_buf[0].i_addr;
689 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
690 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
691 return -EFSCORRUPTED;
692 }
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700693
Darrick J. Wong154c7332020-05-01 16:00:54 -0700694 xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700695 return 0;
696}
697
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700698const struct xlog_recover_item_ops xlog_bud_item_ops = {
699 .item_type = XFS_LI_BUD,
Darrick J. Wong3c6ba3c2020-05-01 16:00:50 -0700700 .commit_pass2 = xlog_recover_bud_commit_pass2,
Darrick J. Wong86ffa472020-05-01 16:00:45 -0700701};