Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2018 Oracle. All Rights Reserved. |
| 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
| 10 | #include "xfs_trans_resv.h" |
| 11 | #include "xfs_mount.h" |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 12 | #include "xfs_btree.h" |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 13 | #include "scrub/bitmap.h" |
| 14 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 15 | /* |
| 16 | * Set a range of this bitmap. Caller must ensure the range is not set. |
| 17 | * |
| 18 | * This is the logical equivalent of bitmap |= mask(start, len). |
| 19 | */ |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 20 | int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 21 | xbitmap_set( |
| 22 | struct xbitmap *bitmap, |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 23 | uint64_t start, |
| 24 | uint64_t len) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 25 | { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 26 | struct xbitmap_range *bmr; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 27 | |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 28 | bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL); |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 29 | if (!bmr) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 30 | return -ENOMEM; |
| 31 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 32 | INIT_LIST_HEAD(&bmr->list); |
| 33 | bmr->start = start; |
| 34 | bmr->len = len; |
| 35 | list_add_tail(&bmr->list, &bitmap->list); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 36 | |
| 37 | return 0; |
| 38 | } |
| 39 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 40 | /* Free everything related to this bitmap. */ |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 41 | void |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 42 | xbitmap_destroy( |
| 43 | struct xbitmap *bitmap) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 44 | { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 45 | struct xbitmap_range *bmr; |
| 46 | struct xbitmap_range *n; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 47 | |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 48 | for_each_xbitmap_extent(bmr, n, bitmap) { |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 49 | list_del(&bmr->list); |
| 50 | kmem_free(bmr); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 51 | } |
| 52 | } |
| 53 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 54 | /* Set up a per-AG block bitmap. */ |
| 55 | void |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 56 | xbitmap_init( |
| 57 | struct xbitmap *bitmap) |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 58 | { |
| 59 | INIT_LIST_HEAD(&bitmap->list); |
| 60 | } |
| 61 | |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 62 | /* Compare two btree extents. */ |
| 63 | static int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 64 | xbitmap_range_cmp( |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 65 | void *priv, |
Sami Tolvanen | 4f0f586 | 2021-04-08 11:28:34 -0700 | [diff] [blame] | 66 | const struct list_head *a, |
| 67 | const struct list_head *b) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 68 | { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 69 | struct xbitmap_range *ap; |
| 70 | struct xbitmap_range *bp; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 71 | |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 72 | ap = container_of(a, struct xbitmap_range, list); |
| 73 | bp = container_of(b, struct xbitmap_range, list); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 74 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 75 | if (ap->start > bp->start) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 76 | return 1; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 77 | if (ap->start < bp->start) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 78 | return -1; |
| 79 | return 0; |
| 80 | } |
| 81 | |
| 82 | /* |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 83 | * Remove all the blocks mentioned in @sub from the extents in @bitmap. |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 84 | * |
| 85 | * The intent is that callers will iterate the rmapbt for all of its records |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 86 | * for a given owner to generate @bitmap; and iterate all the blocks of the |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 87 | * metadata structures that are not being rebuilt and have the same rmapbt |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 88 | * owner to generate @sub. This routine subtracts all the extents |
| 89 | * mentioned in sub from all the extents linked in @bitmap, which leaves |
| 90 | * @bitmap as the list of blocks that are not accounted for, which we assume |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 91 | * are the dead blocks of the old metadata structure. The blocks mentioned in |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 92 | * @bitmap can be reaped. |
| 93 | * |
| 94 | * This is the logical equivalent of bitmap &= ~sub. |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 95 | */ |
| 96 | #define LEFT_ALIGNED (1 << 0) |
| 97 | #define RIGHT_ALIGNED (1 << 1) |
| 98 | int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 99 | xbitmap_disunion( |
| 100 | struct xbitmap *bitmap, |
| 101 | struct xbitmap *sub) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 102 | { |
| 103 | struct list_head *lp; |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 104 | struct xbitmap_range *br; |
| 105 | struct xbitmap_range *new_br; |
| 106 | struct xbitmap_range *sub_br; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 107 | uint64_t sub_start; |
| 108 | uint64_t sub_len; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 109 | int state; |
| 110 | int error = 0; |
| 111 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 112 | if (list_empty(&bitmap->list) || list_empty(&sub->list)) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 113 | return 0; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 114 | ASSERT(!list_empty(&sub->list)); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 115 | |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 116 | list_sort(NULL, &bitmap->list, xbitmap_range_cmp); |
| 117 | list_sort(NULL, &sub->list, xbitmap_range_cmp); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 118 | |
| 119 | /* |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 120 | * Now that we've sorted both lists, we iterate bitmap once, rolling |
| 121 | * forward through sub and/or bitmap as necessary until we find an |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 122 | * overlap or reach the end of either list. We do not reset lp to the |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 123 | * head of bitmap nor do we reset sub_br to the head of sub. The |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 124 | * list traversal is similar to merge sort, but we're deleting |
| 125 | * instead. In this manner we avoid O(n^2) operations. |
| 126 | */ |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 127 | sub_br = list_first_entry(&sub->list, struct xbitmap_range, |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 128 | list); |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 129 | lp = bitmap->list.next; |
| 130 | while (lp != &bitmap->list) { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 131 | br = list_entry(lp, struct xbitmap_range, list); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 132 | |
| 133 | /* |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 134 | * Advance sub_br and/or br until we find a pair that |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 135 | * intersect or we run out of extents. |
| 136 | */ |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 137 | while (sub_br->start + sub_br->len <= br->start) { |
| 138 | if (list_is_last(&sub_br->list, &sub->list)) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 139 | goto out; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 140 | sub_br = list_next_entry(sub_br, list); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 141 | } |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 142 | if (sub_br->start >= br->start + br->len) { |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 143 | lp = lp->next; |
| 144 | continue; |
| 145 | } |
| 146 | |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 147 | /* trim sub_br to fit the extent we have */ |
| 148 | sub_start = sub_br->start; |
| 149 | sub_len = sub_br->len; |
| 150 | if (sub_br->start < br->start) { |
| 151 | sub_len -= br->start - sub_br->start; |
| 152 | sub_start = br->start; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 153 | } |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 154 | if (sub_len > br->len) |
| 155 | sub_len = br->len; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 156 | |
| 157 | state = 0; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 158 | if (sub_start == br->start) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 159 | state |= LEFT_ALIGNED; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 160 | if (sub_start + sub_len == br->start + br->len) |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 161 | state |= RIGHT_ALIGNED; |
| 162 | switch (state) { |
| 163 | case LEFT_ALIGNED: |
| 164 | /* Coincides with only the left. */ |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 165 | br->start += sub_len; |
| 166 | br->len -= sub_len; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 167 | break; |
| 168 | case RIGHT_ALIGNED: |
| 169 | /* Coincides with only the right. */ |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 170 | br->len -= sub_len; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 171 | lp = lp->next; |
| 172 | break; |
| 173 | case LEFT_ALIGNED | RIGHT_ALIGNED: |
| 174 | /* Total overlap, just delete ex. */ |
| 175 | lp = lp->next; |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 176 | list_del(&br->list); |
| 177 | kmem_free(br); |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 178 | break; |
| 179 | case 0: |
| 180 | /* |
| 181 | * Deleting from the middle: add the new right extent |
| 182 | * and then shrink the left extent. |
| 183 | */ |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 184 | new_br = kmem_alloc(sizeof(struct xbitmap_range), |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 185 | KM_MAYFAIL); |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 186 | if (!new_br) { |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 187 | error = -ENOMEM; |
| 188 | goto out; |
| 189 | } |
Darrick J. Wong | 86d969b | 2018-07-30 11:18:13 -0700 | [diff] [blame] | 190 | INIT_LIST_HEAD(&new_br->list); |
| 191 | new_br->start = sub_start + sub_len; |
| 192 | new_br->len = br->start + br->len - new_br->start; |
| 193 | list_add(&new_br->list, &br->list); |
| 194 | br->len = sub_start - br->start; |
Darrick J. Wong | bc270b5 | 2018-07-29 22:37:09 -0700 | [diff] [blame] | 195 | lp = lp->next; |
| 196 | break; |
| 197 | default: |
| 198 | ASSERT(0); |
| 199 | break; |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | out: |
| 204 | return error; |
| 205 | } |
| 206 | #undef LEFT_ALIGNED |
| 207 | #undef RIGHT_ALIGNED |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 208 | |
| 209 | /* |
| 210 | * Record all btree blocks seen while iterating all records of a btree. |
| 211 | * |
| 212 | * We know that the btree query_all function starts at the left edge and walks |
| 213 | * towards the right edge of the tree. Therefore, we know that we can walk up |
| 214 | * the btree cursor towards the root; if the pointer for a given level points |
| 215 | * to the first record/key in that block, we haven't seen this block before; |
| 216 | * and therefore we need to remember that we saw this block in the btree. |
| 217 | * |
| 218 | * So if our btree is: |
| 219 | * |
| 220 | * 4 |
| 221 | * / | \ |
| 222 | * 1 2 3 |
| 223 | * |
| 224 | * Pretend for this example that each leaf block has 100 btree records. For |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 225 | * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we |
| 226 | * record that we saw block 1. Then we observe that bc_levels[1].ptr == 1, so |
| 227 | * we record block 4. The list is [1, 4]. |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 228 | * |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 229 | * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit |
| 230 | * the loop. The list remains [1, 4]. |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 231 | * |
| 232 | * For the 101st btree record, we've moved onto leaf block 2. Now |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 233 | * bc_levels[0].ptr == 1 again, so we record that we saw block 2. We see that |
| 234 | * bc_levels[1].ptr == 2, so we exit the loop. The list is now [1, 4, 2]. |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 235 | * |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 236 | * For the 102nd record, bc_levels[0].ptr == 2, so we continue. |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 237 | * |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 238 | * For the 201st record, we've moved on to leaf block 3. |
| 239 | * bc_levels[0].ptr == 1, so we add 3 to the list. Now it is [1, 4, 2, 3]. |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 240 | * |
| 241 | * For the 300th record we just exit, with the list being [1, 4, 2, 3]. |
| 242 | */ |
| 243 | |
| 244 | /* |
| 245 | * Record all the buffers pointed to by the btree cursor. Callers already |
| 246 | * engaged in a btree walk should call this function to capture the list of |
| 247 | * blocks going from the leaf towards the root. |
| 248 | */ |
| 249 | int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 250 | xbitmap_set_btcur_path( |
| 251 | struct xbitmap *bitmap, |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 252 | struct xfs_btree_cur *cur) |
| 253 | { |
| 254 | struct xfs_buf *bp; |
| 255 | xfs_fsblock_t fsb; |
| 256 | int i; |
| 257 | int error; |
| 258 | |
Darrick J. Wong | 6ca444cf | 2021-09-16 12:24:04 -0700 | [diff] [blame] | 259 | for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) { |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 260 | xfs_btree_get_block(cur, i, &bp); |
| 261 | if (!bp) |
| 262 | continue; |
Dave Chinner | 9343ee7 | 2021-08-18 18:47:05 -0700 | [diff] [blame] | 263 | fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 264 | error = xbitmap_set(bitmap, fsb, 1); |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 265 | if (error) |
| 266 | return error; |
| 267 | } |
| 268 | |
| 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | /* Collect a btree's block in the bitmap. */ |
| 273 | STATIC int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 274 | xbitmap_collect_btblock( |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 275 | struct xfs_btree_cur *cur, |
| 276 | int level, |
| 277 | void *priv) |
| 278 | { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 279 | struct xbitmap *bitmap = priv; |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 280 | struct xfs_buf *bp; |
| 281 | xfs_fsblock_t fsbno; |
| 282 | |
| 283 | xfs_btree_get_block(cur, level, &bp); |
| 284 | if (!bp) |
| 285 | return 0; |
| 286 | |
Dave Chinner | 9343ee7 | 2021-08-18 18:47:05 -0700 | [diff] [blame] | 287 | fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 288 | return xbitmap_set(bitmap, fsbno, 1); |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | /* Walk the btree and mark the bitmap wherever a btree block is found. */ |
| 292 | int |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 293 | xbitmap_set_btblocks( |
| 294 | struct xbitmap *bitmap, |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 295 | struct xfs_btree_cur *cur) |
| 296 | { |
Darrick J. Wong | 00b10d4 | 2020-03-16 17:13:05 -0700 | [diff] [blame] | 297 | return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock, |
Darrick J. Wong | e992ae8 | 2019-10-28 16:12:35 -0700 | [diff] [blame] | 298 | XFS_BTREE_VISIT_ALL, bitmap); |
Darrick J. Wong | 0e93d3f | 2018-08-09 22:43:02 -0700 | [diff] [blame] | 299 | } |
Darrick J. Wong | 608eb3c | 2020-03-16 17:16:35 -0700 | [diff] [blame] | 300 | |
| 301 | /* How many bits are set in this bitmap? */ |
| 302 | uint64_t |
| 303 | xbitmap_hweight( |
| 304 | struct xbitmap *bitmap) |
| 305 | { |
| 306 | struct xbitmap_range *bmr; |
| 307 | struct xbitmap_range *n; |
| 308 | uint64_t ret = 0; |
| 309 | |
| 310 | for_each_xbitmap_extent(bmr, n, bitmap) |
| 311 | ret += bmr->len; |
| 312 | |
| 313 | return ret; |
| 314 | } |