blob: 2c9dad2b61b1a921754ccb3f746287e4293a9215 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wong3daa6642017-10-17 21:37:40 -07002/*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
Darrick J. Wong3daa6642017-10-17 21:37:40 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wong3daa6642017-10-17 21:37:40 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_inode.h"
19#include "xfs_alloc.h"
20#include "xfs_ialloc.h"
21#include "xfs_ialloc_btree.h"
22#include "xfs_icache.h"
23#include "xfs_rmap.h"
24#include "xfs_log.h"
25#include "xfs_trans_priv.h"
26#include "scrub/xfs_scrub.h"
27#include "scrub/scrub.h"
28#include "scrub/common.h"
29#include "scrub/btree.h"
30#include "scrub/trace.h"
31
32/*
33 * Set us up to scrub inode btrees.
34 * If we detect a discrepancy between the inobt and the inode,
35 * try again after forcing logged inode cores out to disk.
36 */
37int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070038xchk_setup_ag_iallocbt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -070039 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -070040 struct xfs_inode *ip)
Darrick J. Wong3daa6642017-10-17 21:37:40 -070041{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070042 return xchk_setup_ag_btree(sc, ip, sc->try_harder);
Darrick J. Wong3daa6642017-10-17 21:37:40 -070043}
44
45/* Inode btree scrubber. */
46
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -080047struct xchk_iallocbt {
48 /* Number of inodes we see while scanning inobt. */
49 unsigned long long inodes;
Darrick J. Wong22234c62019-02-01 09:08:50 -080050
51 /* Expected next startino, for big block filesystems. */
52 xfs_agino_t next_startino;
53
54 /* Expected end of the current inode cluster. */
55 xfs_agino_t next_cluster_ino;
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -080056};
57
Darrick J. Wong2e6f2752018-01-16 18:53:07 -080058/*
59 * If we're checking the finobt, cross-reference with the inobt.
60 * Otherwise we're checking the inobt; if there is an finobt, make sure
61 * we have a record or not depending on freecount.
62 */
63static inline void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070064xchk_iallocbt_chunk_xref_other(
Darrick J. Wong032d91f2018-07-19 12:29:12 -070065 struct xfs_scrub *sc,
Darrick J. Wong2e6f2752018-01-16 18:53:07 -080066 struct xfs_inobt_rec_incore *irec,
67 xfs_agino_t agino)
68{
69 struct xfs_btree_cur **pcur;
70 bool has_irec;
71 int error;
72
73 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
74 pcur = &sc->sa.ino_cur;
75 else
76 pcur = &sc->sa.fino_cur;
77 if (!(*pcur))
78 return;
79 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070080 if (!xchk_should_check_xref(sc, &error, pcur))
Darrick J. Wong2e6f2752018-01-16 18:53:07 -080081 return;
82 if (((irec->ir_freecount > 0 && !has_irec) ||
83 (irec->ir_freecount == 0 && has_irec)))
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070084 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -080085}
86
Darrick J. Wong166d7642018-01-16 18:53:05 -080087/* Cross-reference with the other btrees. */
88STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070089xchk_iallocbt_chunk_xref(
Darrick J. Wong032d91f2018-07-19 12:29:12 -070090 struct xfs_scrub *sc,
Darrick J. Wong166d7642018-01-16 18:53:05 -080091 struct xfs_inobt_rec_incore *irec,
92 xfs_agino_t agino,
93 xfs_agblock_t agbno,
94 xfs_extlen_t len)
95{
96 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
97 return;
Darrick J. Wong52dc4b42018-01-16 18:53:06 -080098
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070099 xchk_xref_is_used_space(sc, agbno, len);
100 xchk_iallocbt_chunk_xref_other(sc, irec, agino);
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800101 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700102 xchk_xref_is_not_shared(sc, agbno, len);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800103}
104
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700105/* Is this chunk worth checking? */
106STATIC bool
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700107xchk_iallocbt_chunk(
108 struct xchk_btree *bs,
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700109 struct xfs_inobt_rec_incore *irec,
110 xfs_agino_t agino,
111 xfs_extlen_t len)
112{
113 struct xfs_mount *mp = bs->cur->bc_mp;
114 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
115 xfs_agblock_t bno;
116
117 bno = XFS_AGINO_TO_AGBNO(mp, agino);
118 if (bno + len <= bno ||
119 !xfs_verify_agbno(mp, agno, bno) ||
120 !xfs_verify_agbno(mp, agno, bno + len - 1))
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700121 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700122
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700123 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800124
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700125 return true;
126}
127
128/* Count the number of free inodes. */
129static unsigned int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700130xchk_iallocbt_freecount(
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700131 xfs_inofree_t freemask)
132{
133 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
134 return hweight64(freemask);
135}
136
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800137/*
138 * Check that an inode's allocation status matches ir_free in the inobt
139 * record. First we try querying the in-core inode state, and if the inode
140 * isn't loaded we examine the on-disk inode directly.
141 *
142 * Since there can be 1:M and M:1 mappings between inobt records and inode
143 * clusters, we pass in the inode location information as an inobt record;
144 * the index of an inode cluster within the inobt record (as well as the
145 * cluster buffer itself); and the index of the inode within the cluster.
146 *
147 * @irec is the inobt record.
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800148 * @irec_ino is the inode offset from the start of the record.
149 * @dip is the on-disk inode.
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800150 */
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700151STATIC int
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800152xchk_iallocbt_check_cluster_ifree(
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700153 struct xchk_btree *bs,
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700154 struct xfs_inobt_rec_incore *irec,
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800155 unsigned int irec_ino,
156 struct xfs_dinode *dip)
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700157{
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700158 struct xfs_mount *mp = bs->cur->bc_mp;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800159 xfs_ino_t fsino;
160 xfs_agino_t agino;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800161 bool irec_free;
162 bool ino_inuse;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700163 bool freemask_ok;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800164 int error;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700165
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700166 if (xchk_should_terminate(bs->sc, &error))
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700167 return error;
168
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800169 /*
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800170 * Given an inobt record and the offset of an inode from the start of
171 * the record, compute which fs inode we're talking about.
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800172 */
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800173 agino = irec->ir_startino + irec_ino;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800174 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800175 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800176
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700177 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800178 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700179 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700180 goto out;
181 }
182
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800183 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
184 &ino_inuse);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700185 if (error == -ENODATA) {
186 /* Not cached, just read the disk buffer */
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800187 freemask_ok = irec_free ^ !!(dip->di_mode);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700188 if (!bs->sc->try_harder && !freemask_ok)
189 return -EDEADLOCK;
190 } else if (error < 0) {
191 /*
192 * Inode is only half assembled, or there was an IO error,
193 * or the verifier failed, so don't bother trying to check.
194 * The inode scrubber can deal with this.
195 */
196 goto out;
197 } else {
198 /* Inode is all there. */
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800199 freemask_ok = irec_free ^ ino_inuse;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700200 }
201 if (!freemask_ok)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700202 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700203out:
204 return 0;
205}
206
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800207/*
208 * Check that the holemask and freemask of a hypothetical inode cluster match
209 * what's actually on disk. If sparse inodes are enabled, the cluster does
210 * not actually have to map to inodes if the corresponding holemask bit is set.
211 *
212 * @cluster_base is the first inode in the cluster within the @irec.
213 */
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700214STATIC int
Darrick J. Wonga1954242019-02-01 09:08:51 -0800215xchk_iallocbt_check_cluster(
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700216 struct xchk_btree *bs,
Darrick J. Wonga1954242019-02-01 09:08:51 -0800217 struct xfs_inobt_rec_incore *irec,
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800218 unsigned int cluster_base)
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700219{
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700220 struct xfs_imap imap;
221 struct xfs_mount *mp = bs->cur->bc_mp;
222 struct xfs_dinode *dip;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800223 struct xfs_buf *cluster_bp;
Darrick J. Wonga1954242019-02-01 09:08:51 -0800224 unsigned int nr_inodes;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800225 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700226 xfs_agblock_t agbno;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800227 unsigned int cluster_index;
228 uint16_t cluster_mask = 0;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700229 uint16_t ir_holemask;
230 int error = 0;
231
Darrick J. Wong435dcf02019-02-01 09:08:49 -0800232 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
233 mp->m_inodes_per_cluster);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700234
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800235 /* Map this inode cluster */
236 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
Darrick J. Wonga1954242019-02-01 09:08:51 -0800237
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800238 /* Compute a bitmask for this cluster that can be used for holemask. */
239 for (cluster_index = 0;
240 cluster_index < nr_inodes;
241 cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
242 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
Darrick J. Wonga1954242019-02-01 09:08:51 -0800243 XFS_INODES_PER_HOLEMASK_BIT);
244
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800245 /*
246 * Map the first inode of this cluster to a buffer and offset.
247 * Be careful about inobt records that don't align with the start of
248 * the inode buffer when block sizes are large enough to hold multiple
249 * inode chunks. When this happens, cluster_base will be zero but
250 * ir_startino can be large enough to make im_boffset nonzero.
251 */
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800252 ir_holemask = (irec->ir_holemask & cluster_mask);
253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino);
256
257 if (imap.im_boffset != 0 && cluster_base != 0) {
258 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
259 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
260 return 0;
261 }
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800262
263 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
264 imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
265 cluster_mask, ir_holemask,
266 XFS_INO_TO_OFFSET(mp, irec->ir_startino +
267 cluster_base));
268
Darrick J. Wonga1954242019-02-01 09:08:51 -0800269 /* The whole cluster must be a hole or not a hole. */
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800270 if (ir_holemask != cluster_mask && ir_holemask != 0) {
Darrick J. Wonga1954242019-02-01 09:08:51 -0800271 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
272 return 0;
273 }
274
275 /* If any part of this is a hole, skip it. */
276 if (ir_holemask) {
277 xchk_xref_is_not_owned_by(bs->sc, agbno,
278 mp->m_blocks_per_cluster,
279 &XFS_RMAP_OINFO_INODES);
280 return 0;
281 }
282
283 xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
284 &XFS_RMAP_OINFO_INODES);
285
286 /* Grab the inode cluster buffer. */
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800287 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
288 0, 0);
Darrick J. Wonga1954242019-02-01 09:08:51 -0800289 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800290 return error;
Darrick J. Wonga1954242019-02-01 09:08:51 -0800291
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800292 /* Check free status of each inode within this cluster. */
293 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800294 struct xfs_dinode *dip;
295
296 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
297 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
298 break;
299 }
300
301 dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800302 error = xchk_iallocbt_check_cluster_ifree(bs, irec,
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800303 cluster_base + cluster_index, dip);
Darrick J. Wonga1954242019-02-01 09:08:51 -0800304 if (error)
305 break;
Darrick J. Wongf9e63342019-02-01 09:08:52 -0800306 imap.im_boffset += mp->m_sb.sb_inodesize;
Darrick J. Wonga1954242019-02-01 09:08:51 -0800307 }
308
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800309 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
Darrick J. Wonga1954242019-02-01 09:08:51 -0800310 return error;
311}
312
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800313/*
314 * For all the inode clusters that could map to this inobt record, make sure
315 * that the holemask makes sense and that the allocation status of each inode
316 * matches the freemask.
317 */
Darrick J. Wonga1954242019-02-01 09:08:51 -0800318STATIC int
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800319xchk_iallocbt_check_clusters(
Darrick J. Wonga1954242019-02-01 09:08:51 -0800320 struct xchk_btree *bs,
321 struct xfs_inobt_rec_incore *irec)
322{
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800323 unsigned int cluster_base;
Darrick J. Wonga1954242019-02-01 09:08:51 -0800324 int error = 0;
325
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800326 /*
327 * For the common case where this inobt record maps to multiple inode
328 * clusters this will call _check_cluster for each cluster.
329 *
330 * For the case that multiple inobt records map to a single cluster,
331 * this will call _check_cluster once.
332 */
333 for (cluster_base = 0;
334 cluster_base < XFS_INODES_PER_CHUNK;
335 cluster_base += bs->sc->mp->m_inodes_per_cluster) {
336 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
Darrick J. Wonga1954242019-02-01 09:08:51 -0800337 if (error)
338 break;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700339 }
340
341 return error;
342}
343
Darrick J. Wongc050fdf2019-02-01 09:08:50 -0800344/*
345 * Make sure this inode btree record is aligned properly. Because a fs block
346 * contains multiple inodes, we check that the inobt record is aligned to the
347 * correct inode, not just the correct block on disk. This results in a finer
348 * grained corruption check.
349 */
350STATIC void
351xchk_iallocbt_rec_alignment(
352 struct xchk_btree *bs,
353 struct xfs_inobt_rec_incore *irec)
354{
355 struct xfs_mount *mp = bs->sc->mp;
Darrick J. Wong22234c62019-02-01 09:08:50 -0800356 struct xchk_iallocbt *iabt = bs->private;
Darrick J. Wongc050fdf2019-02-01 09:08:50 -0800357
358 /*
359 * finobt records have different positioning requirements than inobt
360 * records: each finobt record must have a corresponding inobt record.
361 * That is checked in the xref function, so for now we only catch the
362 * obvious case where the record isn't at all aligned properly.
363 *
364 * Note that if a fs block contains more than a single chunk of inodes,
365 * we will have finobt records only for those chunks containing free
366 * inodes, and therefore expect chunk alignment of finobt records.
367 * Otherwise, we expect that the finobt record is aligned to the
368 * cluster alignment as told by the superblock.
369 */
370 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
371 unsigned int imask;
372
373 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
374 mp->m_cluster_align_inodes) - 1;
375 if (irec->ir_startino & imask)
376 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
377 return;
378 }
379
Darrick J. Wong22234c62019-02-01 09:08:50 -0800380 if (iabt->next_startino != NULLAGINO) {
381 /*
382 * We're midway through a cluster of inodes that is mapped by
383 * multiple inobt records. Did we get the record for the next
384 * irec in the sequence?
385 */
386 if (irec->ir_startino != iabt->next_startino) {
387 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
388 return;
389 }
390
391 iabt->next_startino += XFS_INODES_PER_CHUNK;
392
393 /* Are we done with the cluster? */
394 if (iabt->next_startino >= iabt->next_cluster_ino) {
395 iabt->next_startino = NULLAGINO;
396 iabt->next_cluster_ino = NULLAGINO;
397 }
398 return;
399 }
400
Darrick J. Wongc050fdf2019-02-01 09:08:50 -0800401 /* inobt records must be aligned to cluster and inoalignmnt size. */
402 if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
403 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
404 return;
405 }
406
407 if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
408 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
409 return;
410 }
Darrick J. Wong22234c62019-02-01 09:08:50 -0800411
412 if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
413 return;
414
415 /*
416 * If this is the start of an inode cluster that can be mapped by
417 * multiple inobt records, the next inobt record must follow exactly
418 * after this one.
419 */
420 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
421 iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
Darrick J. Wongc050fdf2019-02-01 09:08:50 -0800422}
423
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700424/* Scrub an inobt/finobt record. */
425STATIC int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700426xchk_iallocbt_rec(
427 struct xchk_btree *bs,
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700428 union xfs_btree_rec *rec)
429{
430 struct xfs_mount *mp = bs->cur->bc_mp;
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800431 struct xchk_iallocbt *iabt = bs->private;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700432 struct xfs_inobt_rec_incore irec;
433 uint64_t holes;
434 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
435 xfs_agino_t agino;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700436 xfs_extlen_t len;
437 int holecount;
438 int i;
439 int error = 0;
440 unsigned int real_freecount;
441 uint16_t holemask;
442
443 xfs_inobt_btrec_to_irec(mp, rec, &irec);
444
445 if (irec.ir_count > XFS_INODES_PER_CHUNK ||
446 irec.ir_freecount > XFS_INODES_PER_CHUNK)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700447 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700448
449 real_freecount = irec.ir_freecount +
450 (XFS_INODES_PER_CHUNK - irec.ir_count);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700451 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
452 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700453
454 agino = irec.ir_startino;
455 /* Record has to be properly aligned within the AG. */
456 if (!xfs_verify_agino(mp, agno, agino) ||
457 !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700458 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700459 goto out;
460 }
461
Darrick J. Wongc050fdf2019-02-01 09:08:50 -0800462 xchk_iallocbt_rec_alignment(bs, &irec);
463 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
464 goto out;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700465
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800466 iabt->inodes += irec.ir_count;
Darrick J. Wongd8526572018-01-16 18:53:08 -0800467
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700468 /* Handle non-sparse inodes */
469 if (!xfs_inobt_issparse(irec.ir_holemask)) {
470 len = XFS_B_TO_FSB(mp,
471 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
472 if (irec.ir_count != XFS_INODES_PER_CHUNK)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700473 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700474
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700475 if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700476 goto out;
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800477 goto check_clusters;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700478 }
479
480 /* Check each chunk of a sparse inode cluster. */
481 holemask = irec.ir_holemask;
482 holecount = 0;
483 len = XFS_B_TO_FSB(mp,
484 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
485 holes = ~xfs_inobt_irec_to_allocmask(&irec);
486 if ((holes & irec.ir_free) != holes ||
487 irec.ir_freecount > irec.ir_count)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700488 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700489
490 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
491 if (holemask & 1)
492 holecount += XFS_INODES_PER_HOLEMASK_BIT;
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700493 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700494 break;
495 holemask >>= 1;
496 agino += XFS_INODES_PER_HOLEMASK_BIT;
497 }
498
499 if (holecount > XFS_INODES_PER_CHUNK ||
500 holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700501 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700502
Darrick J. Wongb9454fe2019-02-01 09:08:51 -0800503check_clusters:
504 error = xchk_iallocbt_check_clusters(bs, &irec);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700505 if (error)
506 goto out;
507
508out:
509 return error;
510}
511
Darrick J. Wongd8526572018-01-16 18:53:08 -0800512/*
513 * Make sure the inode btrees are as large as the rmap thinks they are.
514 * Don't bother if we're missing btree cursors, as we're already corrupt.
515 */
516STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700517xchk_iallocbt_xref_rmap_btreeblks(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700518 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700519 int which)
Darrick J. Wongd8526572018-01-16 18:53:08 -0800520{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700521 xfs_filblks_t blocks;
522 xfs_extlen_t inobt_blocks = 0;
523 xfs_extlen_t finobt_blocks = 0;
524 int error;
Darrick J. Wongd8526572018-01-16 18:53:08 -0800525
526 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
Darrick J. Wong8389f3ff2018-05-14 06:34:31 -0700527 (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700528 xchk_skip_xref(sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800529 return;
530
531 /* Check that we saw as many inobt blocks as the rmap says. */
532 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700533 if (!xchk_process_error(sc, 0, 0, &error))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800534 return;
535
536 if (sc->sa.fino_cur) {
537 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700538 if (!xchk_process_error(sc, 0, 0, &error))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800539 return;
540 }
541
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800542 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
543 &XFS_RMAP_OINFO_INOBT, &blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700544 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800545 return;
546 if (blocks != inobt_blocks + finobt_blocks)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700547 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800548}
549
550/*
551 * Make sure that the inobt records point to the same number of blocks as
552 * the rmap says are owned by inodes.
553 */
554STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700555xchk_iallocbt_xref_rmap_inodes(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700556 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700557 int which,
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800558 unsigned long long inodes)
Darrick J. Wongd8526572018-01-16 18:53:08 -0800559{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700560 xfs_filblks_t blocks;
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800561 xfs_filblks_t inode_blocks;
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700562 int error;
Darrick J. Wongd8526572018-01-16 18:53:08 -0800563
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700564 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800565 return;
566
567 /* Check that we saw as many inode blocks as the rmap knows about. */
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800568 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
569 &XFS_RMAP_OINFO_INODES, &blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700570 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
Darrick J. Wongd8526572018-01-16 18:53:08 -0800571 return;
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800572 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800573 if (blocks != inode_blocks)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700574 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800575}
576
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700577/* Scrub the inode btrees for some AG. */
578STATIC int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700579xchk_iallocbt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700580 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700581 xfs_btnum_t which)
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700582{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700583 struct xfs_btree_cur *cur;
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800584 struct xchk_iallocbt iabt = {
585 .inodes = 0,
Darrick J. Wong22234c62019-02-01 09:08:50 -0800586 .next_startino = NULLAGINO,
587 .next_cluster_ino = NULLAGINO,
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800588 };
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700589 int error;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700590
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700591 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800592 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800593 &iabt);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800594 if (error)
595 return error;
596
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700597 xchk_iallocbt_xref_rmap_btreeblks(sc, which);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800598
599 /*
600 * If we're scrubbing the inode btree, inode_blocks is the number of
601 * blocks pointed to by all the inode chunk records. Therefore, we
602 * should compare to the number of inode chunk blocks that the rmap
603 * knows about. We can't do this for the finobt since it only points
604 * to inode chunks with free inodes.
605 */
606 if (which == XFS_BTNUM_INO)
Darrick J. Wong2c2d9d32018-12-12 08:46:26 -0800607 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
Darrick J. Wongd8526572018-01-16 18:53:08 -0800608
609 return error;
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700610}
611
612int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700613xchk_inobt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700614 struct xfs_scrub *sc)
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700615{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700616 return xchk_iallocbt(sc, XFS_BTNUM_INO);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700617}
618
619int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700620xchk_finobt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700621 struct xfs_scrub *sc)
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700622{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700623 return xchk_iallocbt(sc, XFS_BTNUM_FINO);
Darrick J. Wong3daa6642017-10-17 21:37:40 -0700624}
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800625
626/* See if an inode btree has (or doesn't have) an inode chunk record. */
627static inline void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700628xchk_xref_inode_check(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700629 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700630 xfs_agblock_t agbno,
631 xfs_extlen_t len,
632 struct xfs_btree_cur **icur,
633 bool should_have_inodes)
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800634{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700635 bool has_inodes;
636 int error;
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800637
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700638 if (!(*icur) || xchk_skip_xref(sc->sm))
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800639 return;
640
641 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700642 if (!xchk_should_check_xref(sc, &error, icur))
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800643 return;
644 if (has_inodes != should_have_inodes)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700645 xchk_btree_xref_set_corrupt(sc, *icur, 0);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800646}
647
648/* xref check that the extent is not covered by inodes */
649void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700650xchk_xref_is_not_inode_chunk(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700651 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700652 xfs_agblock_t agbno,
653 xfs_extlen_t len)
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800654{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700655 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
656 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800657}
658
659/* xref check that the extent is covered by inodes */
660void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700661xchk_xref_is_inode_chunk(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700662 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700663 xfs_agblock_t agbno,
664 xfs_extlen_t len)
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800665{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700666 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
Darrick J. Wong2e6f2752018-01-16 18:53:07 -0800667}