blob: 0d01a975605ca77d05ea29a4f98d1c7ffbc49d5c [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10002/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong5880f2d72016-08-03 12:04:45 +10005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +100011#include "xfs_bit.h"
Darrick J. Wongb31c2bd2018-02-22 14:41:25 -080012#include "xfs_shared.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100013#include "xfs_mount.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100014#include "xfs_defer.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100015#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_buf_item.h"
18#include "xfs_rmap_item.h"
19#include "xfs_log.h"
Darrick J. Wong9c194642016-08-03 12:16:05 +100020#include "xfs_rmap.h"
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100021
22
23kmem_zone_t *xfs_rui_zone;
24kmem_zone_t *xfs_rud_zone;
25
26static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
27{
28 return container_of(lip, struct xfs_rui_log_item, rui_item);
29}
30
31void
32xfs_rui_item_free(
33 struct xfs_rui_log_item *ruip)
34{
35 if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
36 kmem_free(ruip);
37 else
38 kmem_zone_free(xfs_rui_zone, ruip);
39}
40
Dave Chinner0612d112018-04-02 20:08:27 -070041/*
42 * Freeing the RUI requires that we remove it from the AIL if it has already
43 * been placed there. However, the RUI may not yet have been placed in the AIL
44 * when called by xfs_rui_release() from RUD processing due to the ordering of
45 * committed vs unpin operations in bulk insert operations. Hence the reference
46 * count to ensure only the last caller frees the RUI.
47 */
48void
49xfs_rui_release(
50 struct xfs_rui_log_item *ruip)
51{
52 ASSERT(atomic_read(&ruip->rui_refcount) > 0);
53 if (atomic_dec_and_test(&ruip->rui_refcount)) {
54 xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
55 xfs_rui_item_free(ruip);
56 }
57}
58
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100059STATIC void
60xfs_rui_item_size(
61 struct xfs_log_item *lip,
62 int *nvecs,
63 int *nbytes)
64{
Darrick J. Wongcd001582016-09-19 10:24:27 +100065 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
66
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100067 *nvecs += 1;
Darrick J. Wongcd001582016-09-19 10:24:27 +100068 *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100069}
70
71/*
72 * This is called to fill in the vector of log iovecs for the
73 * given rui log item. We use only 1 iovec, and we point that
74 * at the rui_log_format structure embedded in the rui item.
75 * It is at this point that we assert that all of the extent
76 * slots in the rui item have been filled.
77 */
78STATIC void
79xfs_rui_item_format(
80 struct xfs_log_item *lip,
81 struct xfs_log_vec *lv)
82{
83 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
84 struct xfs_log_iovec *vecp = NULL;
85
86 ASSERT(atomic_read(&ruip->rui_next_extent) ==
87 ruip->rui_format.rui_nextents);
88
89 ruip->rui_format.rui_type = XFS_LI_RUI;
90 ruip->rui_format.rui_size = 1;
91
92 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
Darrick J. Wongcd001582016-09-19 10:24:27 +100093 xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100094}
95
96/*
Darrick J. Wong5880f2d72016-08-03 12:04:45 +100097 * The unpin operation is the last place an RUI is manipulated in the log. It is
98 * either inserted in the AIL or aborted in the event of a log I/O error. In
99 * either case, the RUI transaction has been successfully committed to make it
100 * this far. Therefore, we expect whoever committed the RUI to either construct
101 * and commit the RUD or drop the RUD's reference in the event of error. Simply
102 * drop the log's RUI reference now that the log is done with it.
103 */
104STATIC void
105xfs_rui_item_unpin(
106 struct xfs_log_item *lip,
107 int remove)
108{
109 struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
110
111 xfs_rui_release(ruip);
112}
113
114/*
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000115 * The RUI has been either committed or aborted if the transaction has been
116 * cancelled. If the transaction was cancelled, an RUD isn't going to be
117 * constructed and thus we free the RUI here directly.
118 */
119STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700120xfs_rui_item_release(
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000121 struct xfs_log_item *lip)
122{
Christoph Hellwigddf92052019-06-28 19:27:32 -0700123 xfs_rui_release(RUI_ITEM(lip));
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000124}
125
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000126static const struct xfs_item_ops xfs_rui_item_ops = {
127 .iop_size = xfs_rui_item_size,
128 .iop_format = xfs_rui_item_format,
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000129 .iop_unpin = xfs_rui_item_unpin,
Christoph Hellwigddf92052019-06-28 19:27:32 -0700130 .iop_release = xfs_rui_item_release,
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000131};
132
133/*
134 * Allocate and initialize an rui item with the given number of extents.
135 */
136struct xfs_rui_log_item *
137xfs_rui_init(
138 struct xfs_mount *mp,
139 uint nextents)
140
141{
142 struct xfs_rui_log_item *ruip;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000143
144 ASSERT(nextents > 0);
Darrick J. Wongcd001582016-09-19 10:24:27 +1000145 if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
146 ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), KM_SLEEP);
147 else
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000148 ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000149
150 xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
151 ruip->rui_format.rui_nextents = nextents;
152 ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
153 atomic_set(&ruip->rui_next_extent, 0);
154 atomic_set(&ruip->rui_refcount, 2);
155
156 return ruip;
157}
158
159/*
160 * Copy an RUI format buffer from the given buf, and into the destination
161 * RUI format structure. The RUI/RUD items were designed not to need any
162 * special alignment handling.
163 */
164int
165xfs_rui_copy_format(
166 struct xfs_log_iovec *buf,
167 struct xfs_rui_log_format *dst_rui_fmt)
168{
169 struct xfs_rui_log_format *src_rui_fmt;
170 uint len;
171
172 src_rui_fmt = buf->i_addr;
Darrick J. Wongcd001582016-09-19 10:24:27 +1000173 len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000174
175 if (buf->i_len != len)
176 return -EFSCORRUPTED;
177
Darrick J. Wongcd001582016-09-19 10:24:27 +1000178 memcpy(dst_rui_fmt, src_rui_fmt, len);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000179 return 0;
180}
181
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000182static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
183{
184 return container_of(lip, struct xfs_rud_log_item, rud_item);
185}
186
187STATIC void
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000188xfs_rud_item_size(
189 struct xfs_log_item *lip,
190 int *nvecs,
191 int *nbytes)
192{
193 *nvecs += 1;
Darrick J. Wong722e2512016-08-03 12:28:43 +1000194 *nbytes += sizeof(struct xfs_rud_log_format);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000195}
196
197/*
198 * This is called to fill in the vector of log iovecs for the
199 * given rud log item. We use only 1 iovec, and we point that
200 * at the rud_log_format structure embedded in the rud item.
201 * It is at this point that we assert that all of the extent
202 * slots in the rud item have been filled.
203 */
204STATIC void
205xfs_rud_item_format(
206 struct xfs_log_item *lip,
207 struct xfs_log_vec *lv)
208{
209 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
210 struct xfs_log_iovec *vecp = NULL;
211
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000212 rudp->rud_format.rud_type = XFS_LI_RUD;
213 rudp->rud_format.rud_size = 1;
214
215 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
Darrick J. Wong722e2512016-08-03 12:28:43 +1000216 sizeof(struct xfs_rud_log_format));
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000217}
218
219/*
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000220 * The RUD is either committed or aborted if the transaction is cancelled. If
221 * the transaction is cancelled, drop our reference to the RUI and free the
222 * RUD.
223 */
224STATIC void
Christoph Hellwigddf92052019-06-28 19:27:32 -0700225xfs_rud_item_release(
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000226 struct xfs_log_item *lip)
227{
228 struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
229
Christoph Hellwigddf92052019-06-28 19:27:32 -0700230 xfs_rui_release(rudp->rud_ruip);
231 kmem_zone_free(xfs_rud_zone, rudp);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000232}
233
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000234static const struct xfs_item_ops xfs_rud_item_ops = {
Christoph Hellwig9ce632a2019-06-28 19:27:32 -0700235 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000236 .iop_size = xfs_rud_item_size,
237 .iop_format = xfs_rud_item_format,
Christoph Hellwigddf92052019-06-28 19:27:32 -0700238 .iop_release = xfs_rud_item_release,
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000239};
240
Christoph Hellwig3cfce1e2019-06-28 19:29:41 -0700241static struct xfs_rud_log_item *
Christoph Hellwig608834472019-06-28 19:27:36 -0700242xfs_trans_get_rud(
243 struct xfs_trans *tp,
Darrick J. Wong722e2512016-08-03 12:28:43 +1000244 struct xfs_rui_log_item *ruip)
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000245{
Christoph Hellwig608834472019-06-28 19:27:36 -0700246 struct xfs_rud_log_item *rudp;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000247
Darrick J. Wong722e2512016-08-03 12:28:43 +1000248 rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
Christoph Hellwig608834472019-06-28 19:27:36 -0700249 xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
250 &xfs_rud_item_ops);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000251 rudp->rud_ruip = ruip;
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000252 rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
253
Christoph Hellwig608834472019-06-28 19:27:36 -0700254 xfs_trans_add_item(tp, &rudp->rud_item);
Darrick J. Wong5880f2d72016-08-03 12:04:45 +1000255 return rudp;
256}
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000257
Christoph Hellwig3cfce1e2019-06-28 19:29:41 -0700258/* Set the map extent flags for this reverse mapping. */
259static void
260xfs_trans_set_rmap_flags(
261 struct xfs_map_extent *rmap,
262 enum xfs_rmap_intent_type type,
263 int whichfork,
264 xfs_exntst_t state)
265{
266 rmap->me_flags = 0;
267 if (state == XFS_EXT_UNWRITTEN)
268 rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
269 if (whichfork == XFS_ATTR_FORK)
270 rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
271 switch (type) {
272 case XFS_RMAP_MAP:
273 rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
274 break;
275 case XFS_RMAP_MAP_SHARED:
276 rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
277 break;
278 case XFS_RMAP_UNMAP:
279 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
280 break;
281 case XFS_RMAP_UNMAP_SHARED:
282 rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
283 break;
284 case XFS_RMAP_CONVERT:
285 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
286 break;
287 case XFS_RMAP_CONVERT_SHARED:
288 rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
289 break;
290 case XFS_RMAP_ALLOC:
291 rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
292 break;
293 case XFS_RMAP_FREE:
294 rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
295 break;
296 default:
297 ASSERT(0);
298 }
299}
300
301/*
302 * Finish an rmap update and log it to the RUD. Note that the transaction is
303 * marked dirty regardless of whether the rmap update succeeds or fails to
304 * support the RUI/RUD lifecycle rules.
305 */
306static int
307xfs_trans_log_finish_rmap_update(
308 struct xfs_trans *tp,
309 struct xfs_rud_log_item *rudp,
310 enum xfs_rmap_intent_type type,
311 uint64_t owner,
312 int whichfork,
313 xfs_fileoff_t startoff,
314 xfs_fsblock_t startblock,
315 xfs_filblks_t blockcount,
316 xfs_exntst_t state,
317 struct xfs_btree_cur **pcur)
318{
319 int error;
320
321 error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
322 startblock, blockcount, state, pcur);
323
324 /*
325 * Mark the transaction dirty, even on error. This ensures the
326 * transaction is aborted, which:
327 *
328 * 1.) releases the RUI and frees the RUD
329 * 2.) shuts down the filesystem
330 */
331 tp->t_flags |= XFS_TRANS_DIRTY;
332 set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
333
334 return error;
335}
336
337/* Sort rmap intents by AG. */
338static int
339xfs_rmap_update_diff_items(
340 void *priv,
341 struct list_head *a,
342 struct list_head *b)
343{
344 struct xfs_mount *mp = priv;
345 struct xfs_rmap_intent *ra;
346 struct xfs_rmap_intent *rb;
347
348 ra = container_of(a, struct xfs_rmap_intent, ri_list);
349 rb = container_of(b, struct xfs_rmap_intent, ri_list);
350 return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
351 XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
352}
353
354/* Get an RUI. */
355STATIC void *
356xfs_rmap_update_create_intent(
357 struct xfs_trans *tp,
358 unsigned int count)
359{
360 struct xfs_rui_log_item *ruip;
361
362 ASSERT(tp != NULL);
363 ASSERT(count > 0);
364
365 ruip = xfs_rui_init(tp->t_mountp, count);
366 ASSERT(ruip != NULL);
367
368 /*
369 * Get a log_item_desc to point at the new item.
370 */
371 xfs_trans_add_item(tp, &ruip->rui_item);
372 return ruip;
373}
374
375/* Log rmap updates in the intent item. */
376STATIC void
377xfs_rmap_update_log_item(
378 struct xfs_trans *tp,
379 void *intent,
380 struct list_head *item)
381{
382 struct xfs_rui_log_item *ruip = intent;
383 struct xfs_rmap_intent *rmap;
384 uint next_extent;
385 struct xfs_map_extent *map;
386
387 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
388
389 tp->t_flags |= XFS_TRANS_DIRTY;
390 set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
391
392 /*
393 * atomic_inc_return gives us the value after the increment;
394 * we want to use it as an array index so we need to subtract 1 from
395 * it.
396 */
397 next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
398 ASSERT(next_extent < ruip->rui_format.rui_nextents);
399 map = &ruip->rui_format.rui_extents[next_extent];
400 map->me_owner = rmap->ri_owner;
401 map->me_startblock = rmap->ri_bmap.br_startblock;
402 map->me_startoff = rmap->ri_bmap.br_startoff;
403 map->me_len = rmap->ri_bmap.br_blockcount;
404 xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
405 rmap->ri_bmap.br_state);
406}
407
408/* Get an RUD so we can process all the deferred rmap updates. */
409STATIC void *
410xfs_rmap_update_create_done(
411 struct xfs_trans *tp,
412 void *intent,
413 unsigned int count)
414{
415 return xfs_trans_get_rud(tp, intent);
416}
417
418/* Process a deferred rmap update. */
419STATIC int
420xfs_rmap_update_finish_item(
421 struct xfs_trans *tp,
422 struct list_head *item,
423 void *done_item,
424 void **state)
425{
426 struct xfs_rmap_intent *rmap;
427 int error;
428
429 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
430 error = xfs_trans_log_finish_rmap_update(tp, done_item,
431 rmap->ri_type,
432 rmap->ri_owner, rmap->ri_whichfork,
433 rmap->ri_bmap.br_startoff,
434 rmap->ri_bmap.br_startblock,
435 rmap->ri_bmap.br_blockcount,
436 rmap->ri_bmap.br_state,
437 (struct xfs_btree_cur **)state);
438 kmem_free(rmap);
439 return error;
440}
441
442/* Clean up after processing deferred rmaps. */
443STATIC void
444xfs_rmap_update_finish_cleanup(
445 struct xfs_trans *tp,
446 void *state,
447 int error)
448{
449 struct xfs_btree_cur *rcur = state;
450
451 xfs_rmap_finish_one_cleanup(tp, rcur, error);
452}
453
454/* Abort all pending RUIs. */
455STATIC void
456xfs_rmap_update_abort_intent(
457 void *intent)
458{
459 xfs_rui_release(intent);
460}
461
462/* Cancel a deferred rmap update. */
463STATIC void
464xfs_rmap_update_cancel_item(
465 struct list_head *item)
466{
467 struct xfs_rmap_intent *rmap;
468
469 rmap = container_of(item, struct xfs_rmap_intent, ri_list);
470 kmem_free(rmap);
471}
472
473const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
474 .max_items = XFS_RUI_MAX_FAST_EXTENTS,
475 .diff_items = xfs_rmap_update_diff_items,
476 .create_intent = xfs_rmap_update_create_intent,
477 .abort_intent = xfs_rmap_update_abort_intent,
478 .log_item = xfs_rmap_update_log_item,
479 .create_done = xfs_rmap_update_create_done,
480 .finish_item = xfs_rmap_update_finish_item,
481 .finish_cleanup = xfs_rmap_update_finish_cleanup,
482 .cancel_item = xfs_rmap_update_cancel_item,
483};
484
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000485/*
486 * Process an rmap update intent item that was recovered from the log.
487 * We need to update the rmapbt.
488 */
489int
490xfs_rui_recover(
491 struct xfs_mount *mp,
492 struct xfs_rui_log_item *ruip)
493{
494 int i;
495 int error = 0;
496 struct xfs_map_extent *rmap;
497 xfs_fsblock_t startblock_fsb;
498 bool op_ok;
Darrick J. Wong9c194642016-08-03 12:16:05 +1000499 struct xfs_rud_log_item *rudp;
500 enum xfs_rmap_intent_type type;
501 int whichfork;
502 xfs_exntst_t state;
503 struct xfs_trans *tp;
504 struct xfs_btree_cur *rcur = NULL;
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000505
506 ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
507
508 /*
509 * First check the validity of the extents described by the
510 * RUI. If any are bad, then assume that all are bad and
511 * just toss the RUI.
512 */
513 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
Darrick J. Wonge127faf2016-08-03 12:29:32 +1000514 rmap = &ruip->rui_format.rui_extents[i];
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000515 startblock_fsb = XFS_BB_TO_FSB(mp,
516 XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
517 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
518 case XFS_RMAP_EXTENT_MAP:
Darrick J. Wong0e07c032016-10-03 09:11:47 -0700519 case XFS_RMAP_EXTENT_MAP_SHARED:
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000520 case XFS_RMAP_EXTENT_UNMAP:
Darrick J. Wong0e07c032016-10-03 09:11:47 -0700521 case XFS_RMAP_EXTENT_UNMAP_SHARED:
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000522 case XFS_RMAP_EXTENT_CONVERT:
Darrick J. Wong0e07c032016-10-03 09:11:47 -0700523 case XFS_RMAP_EXTENT_CONVERT_SHARED:
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000524 case XFS_RMAP_EXTENT_ALLOC:
525 case XFS_RMAP_EXTENT_FREE:
526 op_ok = true;
527 break;
528 default:
529 op_ok = false;
530 break;
531 }
Darrick J. Wonge127faf2016-08-03 12:29:32 +1000532 if (!op_ok || startblock_fsb == 0 ||
533 rmap->me_len == 0 ||
534 startblock_fsb >= mp->m_sb.sb_dblocks ||
535 rmap->me_len >= mp->m_sb.sb_agblocks ||
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000536 (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
537 /*
538 * This will pull the RUI from the AIL and
539 * free the memory associated with it.
540 */
541 set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
542 xfs_rui_release(ruip);
543 return -EIO;
544 }
545 }
546
Darrick J. Wongb31c2bd2018-02-22 14:41:25 -0800547 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
548 mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
Darrick J. Wong9c194642016-08-03 12:16:05 +1000549 if (error)
550 return error;
Darrick J. Wong722e2512016-08-03 12:28:43 +1000551 rudp = xfs_trans_get_rud(tp, ruip);
Darrick J. Wong9c194642016-08-03 12:16:05 +1000552
553 for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
Darrick J. Wonge127faf2016-08-03 12:29:32 +1000554 rmap = &ruip->rui_format.rui_extents[i];
Darrick J. Wong9c194642016-08-03 12:16:05 +1000555 state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
556 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
557 whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
558 XFS_ATTR_FORK : XFS_DATA_FORK;
559 switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
560 case XFS_RMAP_EXTENT_MAP:
561 type = XFS_RMAP_MAP;
562 break;
Darrick J. Wongceeb9c82016-10-03 09:11:48 -0700563 case XFS_RMAP_EXTENT_MAP_SHARED:
564 type = XFS_RMAP_MAP_SHARED;
565 break;
Darrick J. Wong9c194642016-08-03 12:16:05 +1000566 case XFS_RMAP_EXTENT_UNMAP:
567 type = XFS_RMAP_UNMAP;
568 break;
Darrick J. Wongceeb9c82016-10-03 09:11:48 -0700569 case XFS_RMAP_EXTENT_UNMAP_SHARED:
570 type = XFS_RMAP_UNMAP_SHARED;
571 break;
Darrick J. Wong9c194642016-08-03 12:16:05 +1000572 case XFS_RMAP_EXTENT_CONVERT:
573 type = XFS_RMAP_CONVERT;
574 break;
Darrick J. Wong3f165b32016-10-03 09:11:48 -0700575 case XFS_RMAP_EXTENT_CONVERT_SHARED:
576 type = XFS_RMAP_CONVERT_SHARED;
577 break;
Darrick J. Wong9c194642016-08-03 12:16:05 +1000578 case XFS_RMAP_EXTENT_ALLOC:
579 type = XFS_RMAP_ALLOC;
580 break;
581 case XFS_RMAP_EXTENT_FREE:
582 type = XFS_RMAP_FREE;
583 break;
584 default:
585 error = -EFSCORRUPTED;
586 goto abort_error;
587 }
588 error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
589 rmap->me_owner, whichfork,
590 rmap->me_startoff, rmap->me_startblock,
591 rmap->me_len, state, &rcur);
592 if (error)
593 goto abort_error;
594
595 }
596
597 xfs_rmap_finish_one_cleanup(tp, rcur, error);
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000598 set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
Darrick J. Wong9c194642016-08-03 12:16:05 +1000599 error = xfs_trans_commit(tp);
600 return error;
601
602abort_error:
603 xfs_rmap_finish_one_cleanup(tp, rcur, error);
604 xfs_trans_cancel(tp);
Darrick J. Wong9e88b5d2016-08-03 12:09:48 +1000605 return error;
606}