blob: 95625aa90c240d7c29cb92e713b01deea26a5621 [file] [log] [blame]
Darrick J. Wongdcb660f2017-10-17 21:37:36 -07001/*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_btree.h"
28#include "xfs_bit.h"
29#include "xfs_log_format.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_inode.h"
Darrick J. Wong80e4e122017-10-17 21:37:42 -070033#include "xfs_icache.h"
34#include "xfs_itable.h"
Darrick J. Wongdcb660f2017-10-17 21:37:36 -070035#include "xfs_alloc.h"
36#include "xfs_alloc_btree.h"
37#include "xfs_bmap.h"
38#include "xfs_bmap_btree.h"
39#include "xfs_ialloc.h"
40#include "xfs_ialloc_btree.h"
41#include "xfs_refcount.h"
42#include "xfs_refcount_btree.h"
43#include "xfs_rmap.h"
44#include "xfs_rmap_btree.h"
Darrick J. Wong3daa6642017-10-17 21:37:40 -070045#include "xfs_log.h"
46#include "xfs_trans_priv.h"
Darrick J. Wongdcb660f2017-10-17 21:37:36 -070047#include "scrub/xfs_scrub.h"
48#include "scrub/scrub.h"
49#include "scrub/common.h"
50#include "scrub/trace.h"
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -070051#include "scrub/btree.h"
Darrick J. Wongdcb660f2017-10-17 21:37:36 -070052
53/* Common code for the metadata scrubbers. */
54
Darrick J. Wong4700d222017-10-17 21:37:36 -070055/*
56 * Handling operational errors.
57 *
58 * The *_process_error() family of functions are used to process error return
59 * codes from functions called as part of a scrub operation.
60 *
61 * If there's no error, we return true to tell the caller that it's ok
62 * to move on to the next check in its list.
63 *
64 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
65 * caller that something bad happened, and we preserve *error so that
66 * the caller can return the *error up the stack to userspace.
67 *
68 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
69 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words,
70 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
71 * not via return codes. We return false to tell the caller that
72 * something bad happened. Since the error has been cleared, the caller
73 * will (presumably) return that zero and scrubbing will move on to
74 * whatever's next.
75 *
76 * ftrace can be used to record the precise metadata location and the
77 * approximate code location of the failed operation.
78 */
79
80/* Check for operational errors. */
Darrick J. Wong64b12562018-01-16 18:52:14 -080081static bool
82__xfs_scrub_process_error(
83 struct xfs_scrub_context *sc,
84 xfs_agnumber_t agno,
85 xfs_agblock_t bno,
86 int *error,
87 __u32 errflag,
88 void *ret_ip)
89{
90 switch (*error) {
91 case 0:
92 return true;
93 case -EDEADLOCK:
94 /* Used to restart an op with deadlock avoidance. */
95 trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
96 break;
97 case -EFSBADCRC:
98 case -EFSCORRUPTED:
99 /* Note the badness but don't abort. */
100 sc->sm->sm_flags |= errflag;
101 *error = 0;
102 /* fall through */
103 default:
104 trace_xfs_scrub_op_error(sc, agno, bno, *error,
105 ret_ip);
106 break;
107 }
108 return false;
109}
110
Darrick J. Wong4700d222017-10-17 21:37:36 -0700111bool
112xfs_scrub_process_error(
113 struct xfs_scrub_context *sc,
114 xfs_agnumber_t agno,
115 xfs_agblock_t bno,
116 int *error)
117{
Darrick J. Wong64b12562018-01-16 18:52:14 -0800118 return __xfs_scrub_process_error(sc, agno, bno, error,
119 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
120}
121
122bool
123xfs_scrub_xref_process_error(
124 struct xfs_scrub_context *sc,
125 xfs_agnumber_t agno,
126 xfs_agblock_t bno,
127 int *error)
128{
129 return __xfs_scrub_process_error(sc, agno, bno, error,
130 XFS_SCRUB_OFLAG_XFAIL, __return_address);
Darrick J. Wong4700d222017-10-17 21:37:36 -0700131}
132
133/* Check for operational errors for a file offset. */
Darrick J. Wong64b12562018-01-16 18:52:14 -0800134static bool
135__xfs_scrub_fblock_process_error(
Darrick J. Wong4700d222017-10-17 21:37:36 -0700136 struct xfs_scrub_context *sc,
137 int whichfork,
138 xfs_fileoff_t offset,
Darrick J. Wong64b12562018-01-16 18:52:14 -0800139 int *error,
140 __u32 errflag,
141 void *ret_ip)
Darrick J. Wong4700d222017-10-17 21:37:36 -0700142{
143 switch (*error) {
144 case 0:
145 return true;
146 case -EDEADLOCK:
147 /* Used to restart an op with deadlock avoidance. */
148 trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
149 break;
150 case -EFSBADCRC:
151 case -EFSCORRUPTED:
152 /* Note the badness but don't abort. */
Darrick J. Wong64b12562018-01-16 18:52:14 -0800153 sc->sm->sm_flags |= errflag;
Darrick J. Wong4700d222017-10-17 21:37:36 -0700154 *error = 0;
155 /* fall through */
156 default:
157 trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
Darrick J. Wong64b12562018-01-16 18:52:14 -0800158 ret_ip);
Darrick J. Wong4700d222017-10-17 21:37:36 -0700159 break;
160 }
161 return false;
162}
163
Darrick J. Wong64b12562018-01-16 18:52:14 -0800164bool
165xfs_scrub_fblock_process_error(
166 struct xfs_scrub_context *sc,
167 int whichfork,
168 xfs_fileoff_t offset,
169 int *error)
170{
171 return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
172 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
173}
174
175bool
176xfs_scrub_fblock_xref_process_error(
177 struct xfs_scrub_context *sc,
178 int whichfork,
179 xfs_fileoff_t offset,
180 int *error)
181{
182 return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
183 XFS_SCRUB_OFLAG_XFAIL, __return_address);
184}
185
Darrick J. Wong4700d222017-10-17 21:37:36 -0700186/*
187 * Handling scrub corruption/optimization/warning checks.
188 *
189 * The *_set_{corrupt,preen,warning}() family of functions are used to
190 * record the presence of metadata that is incorrect (corrupt), could be
191 * optimized somehow (preen), or should be flagged for administrative
192 * review but is not incorrect (warn).
193 *
194 * ftrace can be used to record the precise metadata location and
195 * approximate code location of the failed check.
196 */
197
198/* Record a block which could be optimized. */
199void
200xfs_scrub_block_set_preen(
201 struct xfs_scrub_context *sc,
202 struct xfs_buf *bp)
203{
204 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
205 trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
206}
207
208/*
209 * Record an inode which could be optimized. The trace data will
210 * include the block given by bp if bp is given; otherwise it will use
211 * the block location of the inode record itself.
212 */
213void
214xfs_scrub_ino_set_preen(
215 struct xfs_scrub_context *sc,
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700216 xfs_ino_t ino)
Darrick J. Wong4700d222017-10-17 21:37:36 -0700217{
218 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700219 trace_xfs_scrub_ino_preen(sc, ino, __return_address);
Darrick J. Wong4700d222017-10-17 21:37:36 -0700220}
221
222/* Record a corrupt block. */
223void
224xfs_scrub_block_set_corrupt(
225 struct xfs_scrub_context *sc,
226 struct xfs_buf *bp)
227{
228 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
229 trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
230}
231
Darrick J. Wong64b12562018-01-16 18:52:14 -0800232/* Record a corruption while cross-referencing. */
233void
234xfs_scrub_block_xref_set_corrupt(
235 struct xfs_scrub_context *sc,
236 struct xfs_buf *bp)
237{
238 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
239 trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
240}
241
Darrick J. Wong4700d222017-10-17 21:37:36 -0700242/*
243 * Record a corrupt inode. The trace data will include the block given
244 * by bp if bp is given; otherwise it will use the block location of the
245 * inode record itself.
246 */
247void
248xfs_scrub_ino_set_corrupt(
249 struct xfs_scrub_context *sc,
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700250 xfs_ino_t ino)
Darrick J. Wong4700d222017-10-17 21:37:36 -0700251{
252 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700253 trace_xfs_scrub_ino_error(sc, ino, __return_address);
Darrick J. Wong4700d222017-10-17 21:37:36 -0700254}
255
Darrick J. Wong64b12562018-01-16 18:52:14 -0800256/* Record a corruption while cross-referencing with an inode. */
257void
258xfs_scrub_ino_xref_set_corrupt(
259 struct xfs_scrub_context *sc,
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700260 xfs_ino_t ino)
Darrick J. Wong64b12562018-01-16 18:52:14 -0800261{
262 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700263 trace_xfs_scrub_ino_error(sc, ino, __return_address);
Darrick J. Wong64b12562018-01-16 18:52:14 -0800264}
265
Darrick J. Wong4700d222017-10-17 21:37:36 -0700266/* Record corruption in a block indexed by a file fork. */
267void
268xfs_scrub_fblock_set_corrupt(
269 struct xfs_scrub_context *sc,
270 int whichfork,
271 xfs_fileoff_t offset)
272{
273 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
274 trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
275}
276
Darrick J. Wong64b12562018-01-16 18:52:14 -0800277/* Record a corruption while cross-referencing a fork block. */
278void
279xfs_scrub_fblock_xref_set_corrupt(
280 struct xfs_scrub_context *sc,
281 int whichfork,
282 xfs_fileoff_t offset)
283{
284 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
285 trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
286}
287
Darrick J. Wong4700d222017-10-17 21:37:36 -0700288/*
289 * Warn about inodes that need administrative review but is not
290 * incorrect.
291 */
292void
293xfs_scrub_ino_set_warning(
294 struct xfs_scrub_context *sc,
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700295 xfs_ino_t ino)
Darrick J. Wong4700d222017-10-17 21:37:36 -0700296{
297 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
Darrick J. Wong7e56d9e2018-03-23 10:06:54 -0700298 trace_xfs_scrub_ino_warning(sc, ino, __return_address);
Darrick J. Wong4700d222017-10-17 21:37:36 -0700299}
300
301/* Warn about a block indexed by a file fork that needs review. */
302void
303xfs_scrub_fblock_set_warning(
304 struct xfs_scrub_context *sc,
305 int whichfork,
306 xfs_fileoff_t offset)
307{
308 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
309 trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
310}
311
312/* Signal an incomplete scrub. */
313void
314xfs_scrub_set_incomplete(
315 struct xfs_scrub_context *sc)
316{
317 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
318 trace_xfs_scrub_incomplete(sc, __return_address);
319}
320
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700321/*
Darrick J. Wongd8526572018-01-16 18:53:08 -0800322 * rmap scrubbing -- compute the number of blocks with a given owner,
323 * at least according to the reverse mapping data.
324 */
325
326struct xfs_scrub_rmap_ownedby_info {
327 struct xfs_owner_info *oinfo;
328 xfs_filblks_t *blocks;
329};
330
331STATIC int
332xfs_scrub_count_rmap_ownedby_irec(
333 struct xfs_btree_cur *cur,
334 struct xfs_rmap_irec *rec,
335 void *priv)
336{
337 struct xfs_scrub_rmap_ownedby_info *sroi = priv;
338 bool irec_attr;
339 bool oinfo_attr;
340
341 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
342 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
343
344 if (rec->rm_owner != sroi->oinfo->oi_owner)
345 return 0;
346
347 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
348 (*sroi->blocks) += rec->rm_blockcount;
349
350 return 0;
351}
352
353/*
354 * Calculate the number of blocks the rmap thinks are owned by something.
355 * The caller should pass us an rmapbt cursor.
356 */
357int
358xfs_scrub_count_rmap_ownedby_ag(
359 struct xfs_scrub_context *sc,
360 struct xfs_btree_cur *cur,
361 struct xfs_owner_info *oinfo,
362 xfs_filblks_t *blocks)
363{
364 struct xfs_scrub_rmap_ownedby_info sroi;
365
366 sroi.oinfo = oinfo;
367 *blocks = 0;
368 sroi.blocks = blocks;
369
370 return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
371 &sroi);
372}
373
374/*
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700375 * AG scrubbing
376 *
377 * These helpers facilitate locking an allocation group's header
378 * buffers, setting up cursors for all btrees that are present, and
379 * cleaning everything up once we're through.
380 */
381
Darrick J. Wongab9d5dc2017-10-17 21:37:39 -0700382/* Decide if we want to return an AG header read failure. */
383static inline bool
384want_ag_read_header_failure(
385 struct xfs_scrub_context *sc,
386 unsigned int type)
387{
388 /* Return all AG header read failures when scanning btrees. */
389 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
Darrick J. Wonga12890a2017-10-17 21:37:39 -0700390 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
391 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
Darrick J. Wongab9d5dc2017-10-17 21:37:39 -0700392 return true;
393 /*
394 * If we're scanning a given type of AG header, we only want to
395 * see read failures from that specific header. We'd like the
396 * other headers to cross-check them, but this isn't required.
397 */
398 if (sc->sm->sm_type == type)
399 return true;
400 return false;
401}
402
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700403/*
404 * Grab all the headers for an AG.
405 *
406 * The headers should be released by xfs_scrub_ag_free, but as a fail
407 * safe we attach all the buffers we grab to the scrub transaction so
408 * they'll all be freed when we cancel it.
409 */
410int
411xfs_scrub_ag_read_headers(
412 struct xfs_scrub_context *sc,
413 xfs_agnumber_t agno,
414 struct xfs_buf **agi,
415 struct xfs_buf **agf,
416 struct xfs_buf **agfl)
417{
418 struct xfs_mount *mp = sc->mp;
419 int error;
420
421 error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
Darrick J. Wonga12890a2017-10-17 21:37:39 -0700422 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700423 goto out;
424
425 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
Darrick J. Wongab9d5dc2017-10-17 21:37:39 -0700426 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700427 goto out;
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700428
429 error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
Darrick J. Wongab9d5dc2017-10-17 21:37:39 -0700430 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700431 goto out;
Darrick J. Wong5a0f4332018-01-08 10:49:02 -0800432 error = 0;
Darrick J. Wongb6c1beb2017-10-17 21:37:38 -0700433out:
434 return error;
435}
436
437/* Release all the AG btree cursors. */
438void
439xfs_scrub_ag_btcur_free(
440 struct xfs_scrub_ag *sa)
441{
442 if (sa->refc_cur)
443 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
444 if (sa->rmap_cur)
445 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
446 if (sa->fino_cur)
447 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
448 if (sa->ino_cur)
449 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
450 if (sa->cnt_cur)
451 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
452 if (sa->bno_cur)
453 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
454
455 sa->refc_cur = NULL;
456 sa->rmap_cur = NULL;
457 sa->fino_cur = NULL;
458 sa->ino_cur = NULL;
459 sa->bno_cur = NULL;
460 sa->cnt_cur = NULL;
461}
462
463/* Initialize all the btree cursors for an AG. */
464int
465xfs_scrub_ag_btcur_init(
466 struct xfs_scrub_context *sc,
467 struct xfs_scrub_ag *sa)
468{
469 struct xfs_mount *mp = sc->mp;
470 xfs_agnumber_t agno = sa->agno;
471
472 if (sa->agf_bp) {
473 /* Set up a bnobt cursor for cross-referencing. */
474 sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
475 agno, XFS_BTNUM_BNO);
476 if (!sa->bno_cur)
477 goto err;
478
479 /* Set up a cntbt cursor for cross-referencing. */
480 sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
481 agno, XFS_BTNUM_CNT);
482 if (!sa->cnt_cur)
483 goto err;
484 }
485
486 /* Set up a inobt cursor for cross-referencing. */
487 if (sa->agi_bp) {
488 sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
489 agno, XFS_BTNUM_INO);
490 if (!sa->ino_cur)
491 goto err;
492 }
493
494 /* Set up a finobt cursor for cross-referencing. */
495 if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb)) {
496 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
497 agno, XFS_BTNUM_FINO);
498 if (!sa->fino_cur)
499 goto err;
500 }
501
502 /* Set up a rmapbt cursor for cross-referencing. */
503 if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb)) {
504 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
505 agno);
506 if (!sa->rmap_cur)
507 goto err;
508 }
509
510 /* Set up a refcountbt cursor for cross-referencing. */
511 if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
512 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
513 sa->agf_bp, agno, NULL);
514 if (!sa->refc_cur)
515 goto err;
516 }
517
518 return 0;
519err:
520 return -ENOMEM;
521}
522
523/* Release the AG header context and btree cursors. */
524void
525xfs_scrub_ag_free(
526 struct xfs_scrub_context *sc,
527 struct xfs_scrub_ag *sa)
528{
529 xfs_scrub_ag_btcur_free(sa);
530 if (sa->agfl_bp) {
531 xfs_trans_brelse(sc->tp, sa->agfl_bp);
532 sa->agfl_bp = NULL;
533 }
534 if (sa->agf_bp) {
535 xfs_trans_brelse(sc->tp, sa->agf_bp);
536 sa->agf_bp = NULL;
537 }
538 if (sa->agi_bp) {
539 xfs_trans_brelse(sc->tp, sa->agi_bp);
540 sa->agi_bp = NULL;
541 }
542 sa->agno = NULLAGNUMBER;
543}
544
545/*
546 * For scrub, grab the AGI and the AGF headers, in that order. Locking
547 * order requires us to get the AGI before the AGF. We use the
548 * transaction to avoid deadlocking on crosslinked metadata buffers;
549 * either the caller passes one in (bmap scrub) or we have to create a
550 * transaction ourselves.
551 */
552int
553xfs_scrub_ag_init(
554 struct xfs_scrub_context *sc,
555 xfs_agnumber_t agno,
556 struct xfs_scrub_ag *sa)
557{
558 int error;
559
560 sa->agno = agno;
561 error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
562 &sa->agf_bp, &sa->agfl_bp);
563 if (error)
564 return error;
565
566 return xfs_scrub_ag_btcur_init(sc, sa);
567}
568
Darrick J. Wongdcb660f2017-10-17 21:37:36 -0700569/* Per-scrubber setup functions */
570
Darrick J. Wong9d9c9022018-05-09 10:02:01 -0700571/*
572 * Grab an empty transaction so that we can re-grab locked buffers if
573 * one of our btrees turns out to be cyclic.
574 */
575int
576xfs_scrub_trans_alloc(
577 struct xfs_scrub_context *sc)
578{
579 return xfs_trans_alloc_empty(sc->mp, &sc->tp);
580}
581
Darrick J. Wongdcb660f2017-10-17 21:37:36 -0700582/* Set us up with a transaction and an empty context. */
583int
584xfs_scrub_setup_fs(
585 struct xfs_scrub_context *sc,
586 struct xfs_inode *ip)
587{
Darrick J. Wong9d9c9022018-05-09 10:02:01 -0700588 return xfs_scrub_trans_alloc(sc);
Darrick J. Wongdcb660f2017-10-17 21:37:36 -0700589}
Darrick J. Wongefa7a992017-10-17 21:37:40 -0700590
591/* Set us up with AG headers and btree cursors. */
592int
593xfs_scrub_setup_ag_btree(
594 struct xfs_scrub_context *sc,
595 struct xfs_inode *ip,
596 bool force_log)
597{
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700598 struct xfs_mount *mp = sc->mp;
Darrick J. Wongefa7a992017-10-17 21:37:40 -0700599 int error;
600
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700601 /*
602 * If the caller asks us to checkpont the log, do so. This
603 * expensive operation should be performed infrequently and only
604 * as a last resort. Any caller that sets force_log should
605 * document why they need to do so.
606 */
607 if (force_log) {
608 error = xfs_scrub_checkpoint_log(mp);
609 if (error)
610 return error;
611 }
612
Eric Sandeen8e630832018-01-08 10:41:34 -0800613 error = xfs_scrub_setup_fs(sc, ip);
Darrick J. Wongefa7a992017-10-17 21:37:40 -0700614 if (error)
615 return error;
616
617 return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
618}
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700619
620/* Push everything out of the log onto disk. */
621int
622xfs_scrub_checkpoint_log(
623 struct xfs_mount *mp)
624{
625 int error;
626
Christoph Hellwig60e5bb72018-03-13 23:15:28 -0700627 error = xfs_log_force(mp, XFS_LOG_SYNC);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700628 if (error)
629 return error;
630 xfs_ail_push_all_sync(mp->m_ail);
631 return 0;
632}
Darrick J. Wong80e4e122017-10-17 21:37:42 -0700633
634/*
635 * Given an inode and the scrub control structure, grab either the
636 * inode referenced in the control structure or the inode passed in.
637 * The inode is not locked.
638 */
639int
640xfs_scrub_get_inode(
641 struct xfs_scrub_context *sc,
642 struct xfs_inode *ip_in)
643{
Darrick J. Wongd658e722018-01-08 10:49:04 -0800644 struct xfs_imap imap;
Darrick J. Wong80e4e122017-10-17 21:37:42 -0700645 struct xfs_mount *mp = sc->mp;
646 struct xfs_inode *ip = NULL;
647 int error;
648
Darrick J. Wong80e4e122017-10-17 21:37:42 -0700649 /* We want to scan the inode we already had opened. */
650 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
651 sc->ip = ip_in;
652 return 0;
653 }
654
655 /* Look up the inode, see if the generation number matches. */
656 if (xfs_internal_inum(mp, sc->sm->sm_ino))
657 return -ENOENT;
658 error = xfs_iget(mp, NULL, sc->sm->sm_ino,
659 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
Darrick J. Wongd658e722018-01-08 10:49:04 -0800660 switch (error) {
661 case -ENOENT:
662 /* Inode doesn't exist, just bail out. */
663 return error;
664 case 0:
665 /* Got an inode, continue. */
666 break;
667 case -EINVAL:
668 /*
669 * -EINVAL with IGET_UNTRUSTED could mean one of several
670 * things: userspace gave us an inode number that doesn't
671 * correspond to fs space, or doesn't have an inobt entry;
672 * or it could simply mean that the inode buffer failed the
673 * read verifiers.
674 *
675 * Try just the inode mapping lookup -- if it succeeds, then
676 * the inode buffer verifier failed and something needs fixing.
677 * Otherwise, we really couldn't find it so tell userspace
678 * that it no longer exists.
679 */
680 error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
681 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
682 if (error)
683 return -ENOENT;
684 error = -EFSCORRUPTED;
685 /* fall through */
686 default:
Darrick J. Wong80e4e122017-10-17 21:37:42 -0700687 trace_xfs_scrub_op_error(sc,
688 XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
689 XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
690 error, __return_address);
691 return error;
692 }
693 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
694 iput(VFS_I(ip));
695 return -ENOENT;
696 }
697
698 sc->ip = ip;
699 return 0;
700}
Darrick J. Wonga5c46e52017-10-17 21:37:44 -0700701
702/* Set us up to scrub a file's contents. */
703int
704xfs_scrub_setup_inode_contents(
705 struct xfs_scrub_context *sc,
706 struct xfs_inode *ip,
707 unsigned int resblks)
708{
Darrick J. Wonga5c46e52017-10-17 21:37:44 -0700709 int error;
710
711 error = xfs_scrub_get_inode(sc, ip);
712 if (error)
713 return error;
714
715 /* Got the inode, lock it and we're ready to go. */
716 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
717 xfs_ilock(sc->ip, sc->ilock_flags);
Darrick J. Wong9d9c9022018-05-09 10:02:01 -0700718 error = xfs_scrub_trans_alloc(sc);
Darrick J. Wonga5c46e52017-10-17 21:37:44 -0700719 if (error)
720 goto out;
721 sc->ilock_flags |= XFS_ILOCK_EXCL;
722 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
723
724out:
725 /* scrub teardown will unlock and release the inode for us */
726 return error;
727}
Darrick J. Wong64b12562018-01-16 18:52:14 -0800728
729/*
730 * Predicate that decides if we need to evaluate the cross-reference check.
731 * If there was an error accessing the cross-reference btree, just delete
732 * the cursor and skip the check.
733 */
734bool
735xfs_scrub_should_check_xref(
736 struct xfs_scrub_context *sc,
737 int *error,
738 struct xfs_btree_cur **curpp)
739{
740 if (*error == 0)
741 return true;
742
743 if (curpp) {
744 /* If we've already given up on xref, just bail out. */
745 if (!*curpp)
746 return false;
747
748 /* xref error, delete cursor and bail out. */
749 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
750 *curpp = NULL;
751 }
752
753 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
754 trace_xfs_scrub_xref_error(sc, *error, __return_address);
755
756 /*
757 * Errors encountered during cross-referencing with another
758 * data structure should not cause this scrubber to abort.
759 */
760 *error = 0;
761 return false;
762}
Darrick J. Wongcf1b0b82018-01-16 18:53:11 -0800763
764/* Run the structure verifiers on in-memory buffers to detect bad memory. */
765void
766xfs_scrub_buffer_recheck(
767 struct xfs_scrub_context *sc,
768 struct xfs_buf *bp)
769{
770 xfs_failaddr_t fa;
771
772 if (bp->b_ops == NULL) {
773 xfs_scrub_block_set_corrupt(sc, bp);
774 return;
775 }
776 if (bp->b_ops->verify_struct == NULL) {
777 xfs_scrub_set_incomplete(sc);
778 return;
779 }
780 fa = bp->b_ops->verify_struct(bp);
781 if (!fa)
782 return;
783 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
784 trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
785}