blob: 708b4158eb903b40fe7d577148f4adee8bdb554d [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0+
Darrick J. Wongedc09b52017-10-17 21:37:41 -07002/*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
Darrick J. Wongedc09b52017-10-17 21:37:41 -07004 * Author: Darrick J. Wong <darrick.wong@oracle.com>
Darrick J. Wongedc09b52017-10-17 21:37:41 -07005 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_defer.h"
13#include "xfs_btree.h"
14#include "xfs_bit.h"
15#include "xfs_log_format.h"
16#include "xfs_trans.h"
17#include "xfs_sb.h"
18#include "xfs_alloc.h"
19#include "xfs_rmap.h"
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -080020#include "xfs_refcount.h"
Darrick J. Wongedc09b52017-10-17 21:37:41 -070021#include "scrub/xfs_scrub.h"
22#include "scrub/scrub.h"
23#include "scrub/common.h"
24#include "scrub/btree.h"
25#include "scrub/trace.h"
26
27/*
28 * Set us up to scrub reference count btrees.
29 */
30int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070031xchk_setup_ag_refcountbt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -070032 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -070033 struct xfs_inode *ip)
Darrick J. Wongedc09b52017-10-17 21:37:41 -070034{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070035 return xchk_setup_ag_btree(sc, ip, false);
Darrick J. Wongedc09b52017-10-17 21:37:41 -070036}
37
38/* Reference count btree scrubber. */
39
Darrick J. Wongdbde19d2018-01-16 18:53:08 -080040/*
41 * Confirming Reference Counts via Reverse Mappings
42 *
43 * We want to count the reverse mappings overlapping a refcount record
44 * (bno, len, refcount), allowing for the possibility that some of the
45 * overlap may come from smaller adjoining reverse mappings, while some
46 * comes from single extents which overlap the range entirely. The
47 * outer loop is as follows:
48 *
49 * 1. For all reverse mappings overlapping the refcount extent,
50 * a. If a given rmap completely overlaps, mark it as seen.
51 * b. Otherwise, record the fragment (in agbno order) for later
52 * processing.
53 *
54 * Once we've seen all the rmaps, we know that for all blocks in the
55 * refcount record we want to find $refcount owners and we've already
56 * visited $seen extents that overlap all the blocks. Therefore, we
57 * need to find ($refcount - $seen) owners for every block in the
58 * extent; call that quantity $target_nr. Proceed as follows:
59 *
60 * 2. Pull the first $target_nr fragments from the list; all of them
61 * should start at or before the start of the extent.
62 * Call this subset of fragments the working set.
63 * 3. Until there are no more unprocessed fragments,
64 * a. Find the shortest fragments in the set and remove them.
65 * b. Note the block number of the end of these fragments.
66 * c. Pull the same number of fragments from the list. All of these
67 * fragments should start at the block number recorded in the
68 * previous step.
69 * d. Put those fragments in the set.
70 * 4. Check that there are $target_nr fragments remaining in the list,
71 * and that they all end at or beyond the end of the refcount extent.
72 *
73 * If the refcount is correct, all the check conditions in the algorithm
74 * should always hold true. If not, the refcount is incorrect.
75 */
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070076struct xchk_refcnt_frag {
Darrick J. Wong032d91f2018-07-19 12:29:12 -070077 struct list_head list;
78 struct xfs_rmap_irec rm;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -080079};
80
Darrick J. Wongc517b3a2018-07-19 12:29:11 -070081struct xchk_refcnt_check {
Darrick J. Wong1d8a7482018-07-19 12:29:12 -070082 struct xfs_scrub *sc;
Darrick J. Wong032d91f2018-07-19 12:29:12 -070083 struct list_head fragments;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -080084
85 /* refcount extent we're examining */
Darrick J. Wong032d91f2018-07-19 12:29:12 -070086 xfs_agblock_t bno;
87 xfs_extlen_t len;
88 xfs_nlink_t refcount;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -080089
90 /* number of owners seen */
Darrick J. Wong032d91f2018-07-19 12:29:12 -070091 xfs_nlink_t seen;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -080092};
93
94/*
95 * Decide if the given rmap is large enough that we can redeem it
96 * towards refcount verification now, or if it's a fragment, in
97 * which case we'll hang onto it in the hopes that we'll later
98 * discover that we've collected exactly the correct number of
99 * fragments as the refcountbt says we should have.
100 */
101STATIC int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700102xchk_refcountbt_rmap_check(
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800103 struct xfs_btree_cur *cur,
104 struct xfs_rmap_irec *rec,
105 void *priv)
106{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700107 struct xchk_refcnt_check *refchk = priv;
108 struct xchk_refcnt_frag *frag;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800109 xfs_agblock_t rm_last;
110 xfs_agblock_t rc_last;
111 int error = 0;
112
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700113 if (xchk_should_terminate(refchk->sc, &error))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800114 return error;
115
116 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
117 rc_last = refchk->bno + refchk->len - 1;
118
119 /* Confirm that a single-owner refc extent is a CoW stage. */
120 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700121 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800122 return 0;
123 }
124
125 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
126 /*
127 * The rmap overlaps the refcount record, so we can confirm
128 * one refcount owner seen.
129 */
130 refchk->seen++;
131 } else {
132 /*
133 * This rmap covers only part of the refcount record, so
134 * save the fragment for later processing. If the rmapbt
135 * is healthy each rmap_irec we see will be in agbno order
136 * so we don't need insertion sort here.
137 */
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700138 frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
Darrick J. Wong631fc952018-05-09 10:02:00 -0700139 KM_MAYFAIL);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800140 if (!frag)
141 return -ENOMEM;
142 memcpy(&frag->rm, rec, sizeof(frag->rm));
143 list_add_tail(&frag->list, &refchk->fragments);
144 }
145
146 return 0;
147}
148
149/*
150 * Given a bunch of rmap fragments, iterate through them, keeping
151 * a running tally of the refcount. If this ever deviates from
152 * what we expect (which is the refcountbt's refcount minus the
153 * number of extents that totally covered the refcountbt extent),
154 * we have a refcountbt error.
155 */
156STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700157xchk_refcountbt_process_rmap_fragments(
158 struct xchk_refcnt_check *refchk)
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800159{
160 struct list_head worklist;
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700161 struct xchk_refcnt_frag *frag;
162 struct xchk_refcnt_frag *n;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800163 xfs_agblock_t bno;
164 xfs_agblock_t rbno;
165 xfs_agblock_t next_rbno;
166 xfs_nlink_t nr;
167 xfs_nlink_t target_nr;
168
169 target_nr = refchk->refcount - refchk->seen;
170 if (target_nr == 0)
171 return;
172
173 /*
174 * There are (refchk->rc.rc_refcount - refchk->nr refcount)
175 * references we haven't found yet. Pull that many off the
176 * fragment list and figure out where the smallest rmap ends
177 * (and therefore the next rmap should start). All the rmaps
178 * we pull off should start at or before the beginning of the
179 * refcount record's range.
180 */
181 INIT_LIST_HEAD(&worklist);
182 rbno = NULLAGBLOCK;
183 nr = 1;
184
185 /* Make sure the fragments actually /are/ in agbno order. */
186 bno = 0;
187 list_for_each_entry(frag, &refchk->fragments, list) {
188 if (frag->rm.rm_startblock < bno)
189 goto done;
190 bno = frag->rm.rm_startblock;
191 }
192
193 /*
194 * Find all the rmaps that start at or before the refc extent,
195 * and put them on the worklist.
196 */
197 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
198 if (frag->rm.rm_startblock > refchk->bno)
199 goto done;
200 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
201 if (bno < rbno)
202 rbno = bno;
203 list_move_tail(&frag->list, &worklist);
204 if (nr == target_nr)
205 break;
206 nr++;
207 }
208
209 /*
210 * We should have found exactly $target_nr rmap fragments starting
211 * at or before the refcount extent.
212 */
213 if (nr != target_nr)
214 goto done;
215
216 while (!list_empty(&refchk->fragments)) {
217 /* Discard any fragments ending at rbno from the worklist. */
218 nr = 0;
219 next_rbno = NULLAGBLOCK;
220 list_for_each_entry_safe(frag, n, &worklist, list) {
221 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
222 if (bno != rbno) {
223 if (bno < next_rbno)
224 next_rbno = bno;
225 continue;
226 }
227 list_del(&frag->list);
228 kmem_free(frag);
229 nr++;
230 }
231
232 /* Try to add nr rmaps starting at rbno to the worklist. */
233 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
234 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
235 if (frag->rm.rm_startblock != rbno)
236 goto done;
237 list_move_tail(&frag->list, &worklist);
238 if (next_rbno > bno)
239 next_rbno = bno;
240 nr--;
241 if (nr == 0)
242 break;
243 }
244
245 /*
246 * If we get here and nr > 0, this means that we added fewer
247 * items to the worklist than we discarded because the fragment
248 * list ran out of items. Therefore, we cannot maintain the
249 * required refcount. Something is wrong, so we're done.
250 */
251 if (nr)
252 goto done;
253
254 rbno = next_rbno;
255 }
256
257 /*
258 * Make sure the last extent we processed ends at or beyond
259 * the end of the refcount extent.
260 */
261 if (rbno < refchk->bno + refchk->len)
262 goto done;
263
264 /* Actually record us having seen the remaining refcount. */
265 refchk->seen = refchk->refcount;
266done:
267 /* Delete fragments and work list. */
268 list_for_each_entry_safe(frag, n, &worklist, list) {
269 list_del(&frag->list);
270 kmem_free(frag);
271 }
272 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
273 list_del(&frag->list);
274 kmem_free(frag);
275 }
276}
277
278/* Use the rmap entries covering this extent to verify the refcount. */
279STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700280xchk_refcountbt_xref_rmap(
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700281 struct xfs_scrub *sc,
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800282 xfs_agblock_t bno,
283 xfs_extlen_t len,
284 xfs_nlink_t refcount)
285{
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700286 struct xchk_refcnt_check refchk = {
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800287 .sc = sc,
288 .bno = bno,
289 .len = len,
290 .refcount = refcount,
291 .seen = 0,
292 };
293 struct xfs_rmap_irec low;
294 struct xfs_rmap_irec high;
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700295 struct xchk_refcnt_frag *frag;
296 struct xchk_refcnt_frag *n;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800297 int error;
298
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700299 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800300 return;
301
302 /* Cross-reference with the rmapbt to confirm the refcount. */
303 memset(&low, 0, sizeof(low));
304 low.rm_startblock = bno;
305 memset(&high, 0xFF, sizeof(high));
306 high.rm_startblock = bno + len - 1;
307
308 INIT_LIST_HEAD(&refchk.fragments);
309 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700310 &xchk_refcountbt_rmap_check, &refchk);
311 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800312 goto out_free;
313
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700314 xchk_refcountbt_process_rmap_fragments(&refchk);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800315 if (refcount != refchk.seen)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700316 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800317
318out_free:
319 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
320 list_del(&frag->list);
321 kmem_free(frag);
322 }
323}
324
Darrick J. Wong166d7642018-01-16 18:53:05 -0800325/* Cross-reference with the other btrees. */
326STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700327xchk_refcountbt_xref(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700328 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700329 xfs_agblock_t agbno,
330 xfs_extlen_t len,
331 xfs_nlink_t refcount)
Darrick J. Wong166d7642018-01-16 18:53:05 -0800332{
333 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
334 return;
Darrick J. Wong52dc4b42018-01-16 18:53:06 -0800335
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700336 xchk_xref_is_used_space(sc, agbno, len);
337 xchk_xref_is_not_inode_chunk(sc, agbno, len);
338 xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800339}
340
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700341/* Scrub a refcountbt record. */
342STATIC int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700343xchk_refcountbt_rec(
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700344 struct xchk_btree *bs,
345 union xfs_btree_rec *rec)
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700346{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700347 struct xfs_mount *mp = bs->cur->bc_mp;
348 xfs_agblock_t *cow_blocks = bs->private;
349 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
350 xfs_agblock_t bno;
351 xfs_extlen_t len;
352 xfs_nlink_t refcount;
353 bool has_cowflag;
354 int error = 0;
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700355
356 bno = be32_to_cpu(rec->refc.rc_startblock);
357 len = be32_to_cpu(rec->refc.rc_blockcount);
358 refcount = be32_to_cpu(rec->refc.rc_refcount);
359
360 /* Only CoW records can have refcount == 1. */
361 has_cowflag = (bno & XFS_REFC_COW_START);
362 if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700363 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800364 if (has_cowflag)
365 (*cow_blocks) += len;
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700366
367 /* Check the extent. */
368 bno &= ~XFS_REFC_COW_START;
369 if (bno + len <= bno ||
370 !xfs_verify_agbno(mp, agno, bno) ||
371 !xfs_verify_agbno(mp, agno, bno + len - 1))
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700372 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700373
374 if (refcount == 0)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700375 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700376
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700377 xchk_refcountbt_xref(bs->sc, bno, len, refcount);
Darrick J. Wong166d7642018-01-16 18:53:05 -0800378
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700379 return error;
380}
381
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800382/* Make sure we have as many refc blocks as the rmap says. */
383STATIC void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700384xchk_refcount_xref_rmap(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700385 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700386 xfs_filblks_t cow_blocks)
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800387{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700388 xfs_extlen_t refcbt_blocks = 0;
389 xfs_filblks_t blocks;
390 int error;
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800391
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700392 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800393 return;
394
395 /* Check that we saw as many refcbt blocks as the rmap knows about. */
396 error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700397 if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800398 return;
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800399 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
400 &XFS_RMAP_OINFO_REFC, &blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700401 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800402 return;
403 if (blocks != refcbt_blocks)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700404 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800405
406 /* Check that we saw as many cow blocks as the rmap knows about. */
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800407 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
408 &XFS_RMAP_OINFO_COW, &blocks);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700409 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800410 return;
411 if (blocks != cow_blocks)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700412 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800413}
414
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700415/* Scrub the refcount btree for some AG. */
416int
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700417xchk_refcountbt(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700418 struct xfs_scrub *sc)
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700419{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700420 xfs_agblock_t cow_blocks = 0;
421 int error;
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700422
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700423 error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
Darrick J. Wong7280fed2018-12-12 08:46:23 -0800424 &XFS_RMAP_OINFO_REFC, &cow_blocks);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800425 if (error)
426 return error;
427
Darrick J. Wong66e32372018-12-12 08:46:23 -0800428 xchk_refcount_xref_rmap(sc, cow_blocks);
Darrick J. Wongdbde19d2018-01-16 18:53:08 -0800429
430 return 0;
Darrick J. Wongedc09b52017-10-17 21:37:41 -0700431}
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800432
433/* xref check that a cow staging extent is marked in the refcountbt. */
434void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700435xchk_xref_is_cow_staging(
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700436 struct xfs_scrub *sc,
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800437 xfs_agblock_t agbno,
438 xfs_extlen_t len)
439{
440 struct xfs_refcount_irec rc;
441 bool has_cowflag;
442 int has_refcount;
443 int error;
444
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700445 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800446 return;
447
448 /* Find the CoW staging extent. */
449 error = xfs_refcount_lookup_le(sc->sa.refc_cur,
450 agbno + XFS_REFC_COW_START, &has_refcount);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700451 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800452 return;
453 if (!has_refcount) {
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700454 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800455 return;
456 }
457
458 error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700459 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800460 return;
461 if (!has_refcount) {
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700462 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800463 return;
464 }
465
466 /* CoW flag must be set, refcount must be 1. */
467 has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
468 if (!has_cowflag || rc.rc_refcount != 1)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700469 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800470
471 /* Must be at least as long as what was passed in */
472 if (rc.rc_blockcount < len)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700473 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800474}
475
476/*
477 * xref check that the extent is not shared. Only file data blocks
478 * can have multiple owners.
479 */
480void
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700481xchk_xref_is_not_shared(
Darrick J. Wong1d8a7482018-07-19 12:29:12 -0700482 struct xfs_scrub *sc,
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700483 xfs_agblock_t agbno,
484 xfs_extlen_t len)
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800485{
Darrick J. Wong032d91f2018-07-19 12:29:12 -0700486 bool shared;
487 int error;
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800488
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700489 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800490 return;
491
492 error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700493 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800494 return;
495 if (shared)
Darrick J. Wongc517b3a2018-07-19 12:29:11 -0700496 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
Darrick J. Wongf6d5fc22018-01-16 18:53:09 -0800497}