blob: 42a115e8373945472c37a5efd32fd931374c99df [file] [log] [blame]
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -07001/*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_btree.h"
28#include "xfs_bit.h"
29#include "xfs_log_format.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_inode.h"
33#include "xfs_inode_fork.h"
34#include "xfs_alloc.h"
35#include "xfs_rtalloc.h"
36#include "xfs_bmap.h"
37#include "xfs_bmap_util.h"
38#include "xfs_bmap_btree.h"
39#include "xfs_rmap.h"
Darrick J. Wong5e777b62018-03-23 10:06:53 -070040#include "xfs_rmap_btree.h"
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080041#include "xfs_refcount.h"
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070042#include "scrub/xfs_scrub.h"
43#include "scrub/scrub.h"
44#include "scrub/common.h"
45#include "scrub/btree.h"
46#include "scrub/trace.h"
47
48/* Set us up with an inode's bmap. */
49int
50xfs_scrub_setup_inode_bmap(
51 struct xfs_scrub_context *sc,
52 struct xfs_inode *ip)
53{
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070054 int error;
55
56 error = xfs_scrub_get_inode(sc, ip);
57 if (error)
58 goto out;
59
60 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
61 xfs_ilock(sc->ip, sc->ilock_flags);
62
63 /*
64 * We don't want any ephemeral data fork updates sitting around
65 * while we inspect block mappings, so wait for directio to finish
66 * and flush dirty data if we have delalloc reservations.
67 */
68 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
69 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
70 inode_dio_wait(VFS_I(sc->ip));
71 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
72 if (error)
73 goto out;
74 }
75
76 /* Got the inode, lock it and we're ready to go. */
Darrick J. Wong9d9c9022018-05-09 10:02:01 -070077 error = xfs_scrub_trans_alloc(sc);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -070078 if (error)
79 goto out;
80 sc->ilock_flags |= XFS_ILOCK_EXCL;
81 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
82
83out:
84 /* scrub teardown will unlock and release the inode */
85 return error;
86}
87
88/*
89 * Inode fork block mapping (BMBT) scrubber.
90 * More complex than the others because we have to scrub
91 * all the extents regardless of whether or not the fork
92 * is in btree format.
93 */
94
95struct xfs_scrub_bmap_info {
96 struct xfs_scrub_context *sc;
97 xfs_fileoff_t lastoff;
98 bool is_rt;
99 bool is_shared;
100 int whichfork;
101};
102
Darrick J. Wongd8526572018-01-16 18:53:08 -0800103/* Look for a corresponding rmap for this irec. */
104static inline bool
105xfs_scrub_bmap_get_rmap(
106 struct xfs_scrub_bmap_info *info,
107 struct xfs_bmbt_irec *irec,
108 xfs_agblock_t agbno,
109 uint64_t owner,
110 struct xfs_rmap_irec *rmap)
111{
112 xfs_fileoff_t offset;
113 unsigned int rflags = 0;
114 int has_rmap;
115 int error;
116
117 if (info->whichfork == XFS_ATTR_FORK)
118 rflags |= XFS_RMAP_ATTR_FORK;
119
120 /*
121 * CoW staging extents are owned (on disk) by the refcountbt, so
122 * their rmaps do not have offsets.
123 */
124 if (info->whichfork == XFS_COW_FORK)
125 offset = 0;
126 else
127 offset = irec->br_startoff;
128
129 /*
130 * If the caller thinks this could be a shared bmbt extent (IOWs,
131 * any data fork extent of a reflink inode) then we have to use the
132 * range rmap lookup to make sure we get the correct owner/offset.
133 */
134 if (info->is_shared) {
135 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
136 owner, offset, rflags, rmap, &has_rmap);
137 if (!xfs_scrub_should_check_xref(info->sc, &error,
138 &info->sc->sa.rmap_cur))
139 return false;
140 goto out;
141 }
142
143 /*
144 * Otherwise, use the (faster) regular lookup.
145 */
146 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
147 offset, rflags, &has_rmap);
148 if (!xfs_scrub_should_check_xref(info->sc, &error,
149 &info->sc->sa.rmap_cur))
150 return false;
151 if (!has_rmap)
152 goto out;
153
154 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
155 if (!xfs_scrub_should_check_xref(info->sc, &error,
156 &info->sc->sa.rmap_cur))
157 return false;
158
159out:
160 if (!has_rmap)
161 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
162 irec->br_startoff);
163 return has_rmap;
164}
165
166/* Make sure that we have rmapbt records for this extent. */
167STATIC void
168xfs_scrub_bmap_xref_rmap(
169 struct xfs_scrub_bmap_info *info,
170 struct xfs_bmbt_irec *irec,
171 xfs_agblock_t agbno)
172{
173 struct xfs_rmap_irec rmap;
174 unsigned long long rmap_end;
175 uint64_t owner;
176
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -0700177 if (!info->sc->sa.rmap_cur || xfs_scrub_skip_xref(info->sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800178 return;
179
180 if (info->whichfork == XFS_COW_FORK)
181 owner = XFS_RMAP_OWN_COW;
182 else
183 owner = info->sc->ip->i_ino;
184
185 /* Find the rmap record for this irec. */
186 if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
187 return;
188
189 /* Check the rmap. */
190 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
191 if (rmap.rm_startblock > agbno ||
192 agbno + irec->br_blockcount > rmap_end)
193 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
194 irec->br_startoff);
195
196 /*
197 * Check the logical offsets if applicable. CoW staging extents
198 * don't track logical offsets since the mappings only exist in
199 * memory.
200 */
201 if (info->whichfork != XFS_COW_FORK) {
202 rmap_end = (unsigned long long)rmap.rm_offset +
203 rmap.rm_blockcount;
204 if (rmap.rm_offset > irec->br_startoff ||
205 irec->br_startoff + irec->br_blockcount > rmap_end)
206 xfs_scrub_fblock_xref_set_corrupt(info->sc,
207 info->whichfork, irec->br_startoff);
208 }
209
210 if (rmap.rm_owner != owner)
211 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
212 irec->br_startoff);
213
214 /*
215 * Check for discrepancies between the unwritten flag in the irec and
216 * the rmap. Note that the (in-memory) CoW fork distinguishes between
217 * unwritten and written extents, but we don't track that in the rmap
218 * records because the blocks are owned (on-disk) by the refcountbt,
219 * which doesn't track unwritten state.
220 */
221 if (owner != XFS_RMAP_OWN_COW &&
222 irec->br_state == XFS_EXT_UNWRITTEN &&
223 !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
224 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
225 irec->br_startoff);
226
227 if (info->whichfork == XFS_ATTR_FORK &&
228 !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
229 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
230 irec->br_startoff);
231 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
232 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
233 irec->br_startoff);
234}
235
Darrick J. Wong166d7642018-01-16 18:53:05 -0800236/* Cross-reference a single rtdev extent record. */
237STATIC void
238xfs_scrub_bmap_rt_extent_xref(
239 struct xfs_scrub_bmap_info *info,
240 struct xfs_inode *ip,
241 struct xfs_btree_cur *cur,
242 struct xfs_bmbt_irec *irec)
243{
244 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
245 return;
Darrick J. Wong46d9bfb2018-01-16 18:53:10 -0800246
247 xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
248 irec->br_blockcount);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800249}
250
251/* Cross-reference a single datadev extent record. */
252STATIC void
253xfs_scrub_bmap_extent_xref(
254 struct xfs_scrub_bmap_info *info,
255 struct xfs_inode *ip,
256 struct xfs_btree_cur *cur,
257 struct xfs_bmbt_irec *irec)
258{
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800259 struct xfs_mount *mp = info->sc->mp;
260 xfs_agnumber_t agno;
261 xfs_agblock_t agbno;
262 xfs_extlen_t len;
263 int error;
264
Darrick J. Wong166d7642018-01-16 18:53:05 -0800265 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
266 return;
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800267
268 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
269 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
270 len = irec->br_blockcount;
271
272 error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
273 if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
274 irec->br_startoff, &error))
275 return;
276
277 xfs_scrub_xref_is_used_space(info->sc, agbno, len);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800278 xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800279 xfs_scrub_bmap_xref_rmap(info, irec, agbno);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800280 switch (info->whichfork) {
281 case XFS_DATA_FORK:
282 if (xfs_is_reflink_inode(info->sc->ip))
283 break;
284 /* fall through */
285 case XFS_ATTR_FORK:
286 xfs_scrub_xref_is_not_shared(info->sc, agbno,
287 irec->br_blockcount);
288 break;
289 case XFS_COW_FORK:
290 xfs_scrub_xref_is_cow_staging(info->sc, agbno,
291 irec->br_blockcount);
292 break;
293 }
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800294
295 xfs_scrub_ag_free(info->sc, &info->sc->sa);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800296}
297
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700298/* Scrub a single extent record. */
299STATIC int
300xfs_scrub_bmap_extent(
301 struct xfs_inode *ip,
302 struct xfs_btree_cur *cur,
303 struct xfs_scrub_bmap_info *info,
304 struct xfs_bmbt_irec *irec)
305{
306 struct xfs_mount *mp = info->sc->mp;
307 struct xfs_buf *bp = NULL;
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800308 xfs_filblks_t end;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700309 int error = 0;
310
311 if (cur)
312 xfs_btree_get_block(cur, 0, &bp);
313
314 /*
315 * Check for out-of-order extents. This record could have come
316 * from the incore list, for which there is no ordering check.
317 */
318 if (irec->br_startoff < info->lastoff)
319 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
320 irec->br_startoff);
321
322 /* There should never be a "hole" extent in either extent list. */
323 if (irec->br_startblock == HOLESTARTBLOCK)
324 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
325 irec->br_startoff);
326
327 /*
328 * Check for delalloc extents. We never iterate the ones in the
329 * in-core extent scan, and we should never see these in the bmbt.
330 */
331 if (isnullstartblock(irec->br_startblock))
332 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
333 irec->br_startoff);
334
335 /* Make sure the extent points to a valid place. */
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800336 if (irec->br_blockcount > MAXEXTLEN)
337 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
338 irec->br_startoff);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700339 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
340 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
341 irec->br_startoff);
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800342 end = irec->br_startblock + irec->br_blockcount - 1;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700343 if (info->is_rt &&
344 (!xfs_verify_rtbno(mp, irec->br_startblock) ||
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800345 !xfs_verify_rtbno(mp, end)))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700346 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
347 irec->br_startoff);
348 if (!info->is_rt &&
349 (!xfs_verify_fsbno(mp, irec->br_startblock) ||
Darrick J. Wonga5f460b2018-01-16 18:54:13 -0800350 !xfs_verify_fsbno(mp, end) ||
351 XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
352 XFS_FSB_TO_AGNO(mp, end)))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700353 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
354 irec->br_startoff);
355
356 /* We don't allow unwritten extents on attr forks. */
357 if (irec->br_state == XFS_EXT_UNWRITTEN &&
358 info->whichfork == XFS_ATTR_FORK)
359 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
360 irec->br_startoff);
361
Darrick J. Wong166d7642018-01-16 18:53:05 -0800362 if (info->is_rt)
363 xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
364 else
365 xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
366
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700367 info->lastoff = irec->br_startoff + irec->br_blockcount;
368 return error;
369}
370
371/* Scrub a bmbt record. */
372STATIC int
373xfs_scrub_bmapbt_rec(
374 struct xfs_scrub_btree *bs,
375 union xfs_btree_rec *rec)
376{
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700377 struct xfs_bmbt_irec irec;
378 struct xfs_scrub_bmap_info *info = bs->private;
379 struct xfs_inode *ip = bs->cur->bc_private.b.ip;
380 struct xfs_buf *bp = NULL;
381 struct xfs_btree_block *block;
382 uint64_t owner;
383 int i;
384
385 /*
386 * Check the owners of the btree blocks up to the level below
387 * the root since the verifiers don't do that.
388 */
389 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
390 bs->cur->bc_ptrs[0] == 1) {
391 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
392 block = xfs_btree_get_block(bs->cur, i, &bp);
393 owner = be64_to_cpu(block->bb_u.l.bb_owner);
394 if (owner != ip->i_ino)
395 xfs_scrub_fblock_set_corrupt(bs->sc,
396 info->whichfork, 0);
397 }
398 }
399
400 /* Set up the in-core record and scrub it. */
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700401 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700402 return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
403}
404
405/* Scan the btree records. */
406STATIC int
407xfs_scrub_bmap_btree(
408 struct xfs_scrub_context *sc,
409 int whichfork,
410 struct xfs_scrub_bmap_info *info)
411{
412 struct xfs_owner_info oinfo;
413 struct xfs_mount *mp = sc->mp;
414 struct xfs_inode *ip = sc->ip;
415 struct xfs_btree_cur *cur;
416 int error;
417
418 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
419 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
420 error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
421 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
422 XFS_BTREE_NOERROR);
423 return error;
424}
425
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700426struct xfs_scrub_bmap_check_rmap_info {
427 struct xfs_scrub_context *sc;
428 int whichfork;
429 struct xfs_iext_cursor icur;
430};
431
432/* Can we find bmaps that fit this rmap? */
433STATIC int
434xfs_scrub_bmap_check_rmap(
435 struct xfs_btree_cur *cur,
436 struct xfs_rmap_irec *rec,
437 void *priv)
438{
439 struct xfs_bmbt_irec irec;
440 struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
441 struct xfs_ifork *ifp;
442 struct xfs_scrub_context *sc = sbcri->sc;
443 bool have_map;
444
445 /* Is this even the right fork? */
446 if (rec->rm_owner != sc->ip->i_ino)
447 return 0;
448 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
449 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
450 return 0;
451 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
452 return 0;
453
454 /* Now look up the bmbt record. */
455 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
456 if (!ifp) {
457 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
458 rec->rm_offset);
459 goto out;
460 }
461 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
462 &sbcri->icur, &irec);
463 if (!have_map)
464 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
465 rec->rm_offset);
466 /*
467 * bmap extent record lengths are constrained to 2^21 blocks in length
468 * because of space constraints in the on-disk metadata structure.
469 * However, rmap extent record lengths are constrained only by AG
470 * length, so we have to loop through the bmbt to make sure that the
471 * entire rmap is covered by bmbt records.
472 */
473 while (have_map) {
474 if (irec.br_startoff != rec->rm_offset)
475 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
476 rec->rm_offset);
477 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
478 cur->bc_private.a.agno, rec->rm_startblock))
479 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
480 rec->rm_offset);
481 if (irec.br_blockcount > rec->rm_blockcount)
482 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
483 rec->rm_offset);
484 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
485 break;
486 rec->rm_startblock += irec.br_blockcount;
487 rec->rm_offset += irec.br_blockcount;
488 rec->rm_blockcount -= irec.br_blockcount;
489 if (rec->rm_blockcount == 0)
490 break;
491 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
492 if (!have_map)
493 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
494 rec->rm_offset);
495 }
496
497out:
498 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
499 return XFS_BTREE_QUERY_RANGE_ABORT;
500 return 0;
501}
502
503/* Make sure each rmap has a corresponding bmbt entry. */
504STATIC int
505xfs_scrub_bmap_check_ag_rmaps(
506 struct xfs_scrub_context *sc,
507 int whichfork,
508 xfs_agnumber_t agno)
509{
510 struct xfs_scrub_bmap_check_rmap_info sbcri;
511 struct xfs_btree_cur *cur;
512 struct xfs_buf *agf;
513 int error;
514
515 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
516 if (error)
517 return error;
518
519 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
520 if (!cur) {
521 error = -ENOMEM;
522 goto out_agf;
523 }
524
525 sbcri.sc = sc;
526 sbcri.whichfork = whichfork;
527 error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
528 if (error == XFS_BTREE_QUERY_RANGE_ABORT)
529 error = 0;
530
531 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
532out_agf:
533 xfs_trans_brelse(sc->tp, agf);
534 return error;
535}
536
537/* Make sure each rmap has a corresponding bmbt entry. */
538STATIC int
539xfs_scrub_bmap_check_rmaps(
540 struct xfs_scrub_context *sc,
541 int whichfork)
542{
543 loff_t size;
544 xfs_agnumber_t agno;
545 int error;
546
547 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
548 whichfork == XFS_COW_FORK ||
549 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
550 return 0;
551
552 /* Don't support realtime rmap checks yet. */
553 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
554 return 0;
555
556 /*
557 * Only do this for complex maps that are in btree format, or for
558 * situations where we would seem to have a size but zero extents.
559 * The inode repair code can zap broken iforks, which means we have
560 * to flag this bmap as corrupt if there are rmaps that need to be
561 * reattached.
562 */
563 switch (whichfork) {
564 case XFS_DATA_FORK:
565 size = i_size_read(VFS_I(sc->ip));
566 break;
567 case XFS_ATTR_FORK:
568 size = XFS_IFORK_Q(sc->ip);
569 break;
570 default:
571 size = 0;
572 break;
573 }
574 if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
575 (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
576 return 0;
577
578 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
579 error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
580 if (error)
581 return error;
582 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
583 break;
584 }
585
586 return 0;
587}
588
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700589/*
590 * Scrub an inode fork's block mappings.
591 *
592 * First we scan every record in every btree block, if applicable.
593 * Then we unconditionally scan the incore extent cache.
594 */
595STATIC int
596xfs_scrub_bmap(
597 struct xfs_scrub_context *sc,
598 int whichfork)
599{
600 struct xfs_bmbt_irec irec;
Christoph Hellwig88aa5de2017-11-06 11:53:58 -0800601 struct xfs_scrub_bmap_info info = { NULL };
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700602 struct xfs_mount *mp = sc->mp;
603 struct xfs_inode *ip = sc->ip;
604 struct xfs_ifork *ifp;
605 xfs_fileoff_t endoff;
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700606 struct xfs_iext_cursor icur;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700607 int error = 0;
608
609 ifp = XFS_IFORK_PTR(ip, whichfork);
610
611 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
612 info.whichfork = whichfork;
613 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
614 info.sc = sc;
615
616 switch (whichfork) {
617 case XFS_COW_FORK:
618 /* Non-existent CoW forks are ignorable. */
619 if (!ifp)
620 goto out;
621 /* No CoW forks on non-reflink inodes/filesystems. */
622 if (!xfs_is_reflink_inode(ip)) {
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700623 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700624 goto out;
625 }
626 break;
627 case XFS_ATTR_FORK:
628 if (!ifp)
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700629 goto out_check_rmap;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700630 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
631 !xfs_sb_version_hasattr2(&mp->m_sb))
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700632 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700633 break;
634 default:
635 ASSERT(whichfork == XFS_DATA_FORK);
636 break;
637 }
638
639 /* Check the fork values */
640 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
641 case XFS_DINODE_FMT_UUID:
642 case XFS_DINODE_FMT_DEV:
643 case XFS_DINODE_FMT_LOCAL:
644 /* No mappings to check. */
645 goto out;
646 case XFS_DINODE_FMT_EXTENTS:
647 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
648 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
649 goto out;
650 }
651 break;
652 case XFS_DINODE_FMT_BTREE:
653 if (whichfork == XFS_COW_FORK) {
654 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
655 goto out;
656 }
657
658 error = xfs_scrub_bmap_btree(sc, whichfork, &info);
659 if (error)
660 goto out;
661 break;
662 default:
663 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
664 goto out;
665 }
666
667 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
668 goto out;
669
670 /* Now try to scrub the in-memory extent list. */
671 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
672 error = xfs_iread_extents(sc->tp, ip, whichfork);
673 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
674 goto out;
675 }
676
677 /* Find the offset of the last extent in the mapping. */
678 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
679 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
680 goto out;
681
682 /* Scrub extent records. */
683 info.lastoff = 0;
684 ifp = XFS_IFORK_PTR(ip, whichfork);
Darrick J. Wong2b9e9b52018-01-08 10:49:03 -0800685 for_each_xfs_iext(ifp, &icur, &irec) {
Darrick J. Wong8bc763c2018-05-14 06:34:32 -0700686 if (xfs_scrub_should_terminate(sc, &error) ||
687 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700688 break;
689 if (isnullstartblock(irec.br_startblock))
690 continue;
691 if (irec.br_startoff >= endoff) {
692 xfs_scrub_fblock_set_corrupt(sc, whichfork,
693 irec.br_startoff);
694 goto out;
695 }
696 error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
697 if (error)
698 goto out;
699 }
700
Darrick J. Wong5e777b62018-03-23 10:06:53 -0700701out_check_rmap:
702 error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
703 if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
704 goto out;
Darrick J. Wong99d9d8d2017-10-17 21:37:43 -0700705out:
706 return error;
707}
708
709/* Scrub an inode's data fork. */
710int
711xfs_scrub_bmap_data(
712 struct xfs_scrub_context *sc)
713{
714 return xfs_scrub_bmap(sc, XFS_DATA_FORK);
715}
716
717/* Scrub an inode's attr fork. */
718int
719xfs_scrub_bmap_attr(
720 struct xfs_scrub_context *sc)
721{
722 return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
723}
724
725/* Scrub an inode's CoW fork. */
726int
727xfs_scrub_bmap_cow(
728 struct xfs_scrub_context *sc)
729{
730 if (!xfs_is_reflink_inode(sc->ip))
731 return -ENOENT;
732
733 return xfs_scrub_bmap(sc, XFS_COW_FORK);
734}