Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright (C) 2019 Oracle. All Rights Reserved. |
| 4 | * Author: Darrick J. Wong <darrick.wong@oracle.com> |
| 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
| 8 | #include "xfs_shared.h" |
| 9 | #include "xfs_format.h" |
| 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
| 12 | #include "xfs_mount.h" |
| 13 | #include "xfs_inode.h" |
| 14 | #include "xfs_btree.h" |
| 15 | #include "xfs_ialloc.h" |
| 16 | #include "xfs_ialloc_btree.h" |
| 17 | #include "xfs_iwalk.h" |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 18 | #include "xfs_error.h" |
| 19 | #include "xfs_trace.h" |
| 20 | #include "xfs_icache.h" |
| 21 | #include "xfs_health.h" |
| 22 | #include "xfs_trans.h" |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 23 | #include "xfs_pwork.h" |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 24 | #include "xfs_ag.h" |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * Walking Inodes in the Filesystem |
| 28 | * ================================ |
| 29 | * |
| 30 | * This iterator function walks a subset of filesystem inodes in increasing |
| 31 | * order from @startino until there are no more inodes. For each allocated |
| 32 | * inode it finds, it calls a walk function with the relevant inode number and |
| 33 | * a pointer to caller-provided data. The walk function can return the usual |
| 34 | * negative error code to stop the iteration; 0 to continue the iteration; or |
Darrick J. Wong | e7ee96d | 2019-08-28 14:37:57 -0700 | [diff] [blame] | 35 | * -ECANCELED to stop the iteration. This return value is returned to the |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 36 | * caller. |
| 37 | * |
| 38 | * Internally, we allow the walk function to do anything, which means that we |
| 39 | * cannot maintain the inobt cursor or our lock on the AGI buffer. We |
| 40 | * therefore cache the inobt records in kernel memory and only call the walk |
| 41 | * function when our memory buffer is full. @nr_recs is the number of records |
| 42 | * that we've cached, and @sz_recs is the size of our cache. |
| 43 | * |
| 44 | * It is the responsibility of the walk function to ensure it accesses |
| 45 | * allocated inodes, as the inobt records may be stale by the time they are |
| 46 | * acted upon. |
| 47 | */ |
| 48 | |
| 49 | struct xfs_iwalk_ag { |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 50 | /* parallel work control data; will be null if single threaded */ |
| 51 | struct xfs_pwork pwork; |
| 52 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 53 | struct xfs_mount *mp; |
| 54 | struct xfs_trans *tp; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 55 | struct xfs_perag *pag; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 56 | |
| 57 | /* Where do we start the traversal? */ |
| 58 | xfs_ino_t startino; |
| 59 | |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 60 | /* What was the last inode number we saw when iterating the inobt? */ |
| 61 | xfs_ino_t lastino; |
| 62 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 63 | /* Array of inobt records we cache. */ |
| 64 | struct xfs_inobt_rec_incore *recs; |
| 65 | |
| 66 | /* Number of entries allocated for the @recs array. */ |
| 67 | unsigned int sz_recs; |
| 68 | |
| 69 | /* Number of entries in the @recs array that are in use. */ |
| 70 | unsigned int nr_recs; |
| 71 | |
| 72 | /* Inode walk function and data pointer. */ |
| 73 | xfs_iwalk_fn iwalk_fn; |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 74 | xfs_inobt_walk_fn inobt_walk_fn; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 75 | void *data; |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 76 | |
| 77 | /* |
| 78 | * Make it look like the inodes up to startino are free so that |
| 79 | * bulkstat can start its inode iteration at the correct place without |
| 80 | * needing to special case everywhere. |
| 81 | */ |
| 82 | unsigned int trim_start:1; |
| 83 | |
| 84 | /* Skip empty inobt records? */ |
| 85 | unsigned int skip_empty:1; |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 86 | |
| 87 | /* Drop the (hopefully empty) transaction when calling iwalk_fn. */ |
| 88 | unsigned int drop_trans:1; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 89 | }; |
| 90 | |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 91 | /* |
| 92 | * Loop over all clusters in a chunk for a given incore inode allocation btree |
| 93 | * record. Do a readahead if there are any allocated inodes in that cluster. |
| 94 | */ |
| 95 | STATIC void |
| 96 | xfs_iwalk_ichunk_ra( |
| 97 | struct xfs_mount *mp, |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 98 | struct xfs_perag *pag, |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 99 | struct xfs_inobt_rec_incore *irec) |
| 100 | { |
| 101 | struct xfs_ino_geometry *igeo = M_IGEO(mp); |
| 102 | xfs_agblock_t agbno; |
| 103 | struct blk_plug plug; |
| 104 | int i; /* inode chunk index */ |
| 105 | |
| 106 | agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); |
| 107 | |
| 108 | blk_start_plug(&plug); |
Darrick J. Wong | 688f7c3 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 109 | for (i = 0; i < XFS_INODES_PER_CHUNK; i += igeo->inodes_per_cluster) { |
| 110 | xfs_inofree_t imask; |
| 111 | |
| 112 | imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster); |
| 113 | if (imask & ~irec->ir_free) { |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 114 | xfs_btree_reada_bufs(mp, pag->pag_agno, agbno, |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 115 | igeo->blocks_per_cluster, |
| 116 | &xfs_inode_buf_ops); |
| 117 | } |
Darrick J. Wong | 688f7c3 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 118 | agbno += igeo->blocks_per_cluster; |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 119 | } |
| 120 | blk_finish_plug(&plug); |
| 121 | } |
| 122 | |
| 123 | /* |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 124 | * Set the bits in @irec's free mask that correspond to the inodes before |
| 125 | * @agino so that we skip them. This is how we restart an inode walk that was |
| 126 | * interrupted in the middle of an inode record. |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 127 | */ |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 128 | STATIC void |
| 129 | xfs_iwalk_adjust_start( |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 130 | xfs_agino_t agino, /* starting inode of chunk */ |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 131 | struct xfs_inobt_rec_incore *irec) /* btree record */ |
| 132 | { |
| 133 | int idx; /* index into inode chunk */ |
Darrick J. Wong | 5e29f3b | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 134 | int i; |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 135 | |
Darrick J. Wong | 5e29f3b | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 136 | idx = agino - irec->ir_startino; |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 137 | |
Darrick J. Wong | 5e29f3b | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 138 | /* |
| 139 | * We got a right chunk with some left inodes allocated at it. Grab |
| 140 | * the chunk record. Mark all the uninteresting inodes free because |
| 141 | * they're before our start point. |
| 142 | */ |
| 143 | for (i = 0; i < idx; i++) { |
| 144 | if (XFS_INOBT_MASK(i) & ~irec->ir_free) |
| 145 | irec->ir_freecount++; |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 146 | } |
| 147 | |
Darrick J. Wong | 5e29f3b | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 148 | irec->ir_free |= xfs_inobt_maskn(0, idx); |
Darrick J. Wong | da1d9e5 | 2019-07-02 09:39:41 -0700 | [diff] [blame] | 149 | } |
| 150 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 151 | /* Allocate memory for a walk. */ |
| 152 | STATIC int |
| 153 | xfs_iwalk_alloc( |
| 154 | struct xfs_iwalk_ag *iwag) |
| 155 | { |
| 156 | size_t size; |
| 157 | |
| 158 | ASSERT(iwag->recs == NULL); |
| 159 | iwag->nr_recs = 0; |
| 160 | |
| 161 | /* Allocate a prefetch buffer for inobt records. */ |
| 162 | size = iwag->sz_recs * sizeof(struct xfs_inobt_rec_incore); |
| 163 | iwag->recs = kmem_alloc(size, KM_MAYFAIL); |
| 164 | if (iwag->recs == NULL) |
| 165 | return -ENOMEM; |
| 166 | |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | /* Free memory we allocated for a walk. */ |
| 171 | STATIC void |
| 172 | xfs_iwalk_free( |
| 173 | struct xfs_iwalk_ag *iwag) |
| 174 | { |
| 175 | kmem_free(iwag->recs); |
| 176 | iwag->recs = NULL; |
| 177 | } |
| 178 | |
| 179 | /* For each inuse inode in each cached inobt record, call our function. */ |
| 180 | STATIC int |
| 181 | xfs_iwalk_ag_recs( |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 182 | struct xfs_iwalk_ag *iwag) |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 183 | { |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 184 | struct xfs_mount *mp = iwag->mp; |
| 185 | struct xfs_trans *tp = iwag->tp; |
| 186 | struct xfs_perag *pag = iwag->pag; |
| 187 | xfs_ino_t ino; |
| 188 | unsigned int i, j; |
| 189 | int error; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 190 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 191 | for (i = 0; i < iwag->nr_recs; i++) { |
| 192 | struct xfs_inobt_rec_incore *irec = &iwag->recs[i]; |
| 193 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 194 | trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 195 | |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 196 | if (xfs_pwork_want_abort(&iwag->pwork)) |
| 197 | return 0; |
| 198 | |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 199 | if (iwag->inobt_walk_fn) { |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 200 | error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec, |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 201 | iwag->data); |
| 202 | if (error) |
| 203 | return error; |
| 204 | } |
| 205 | |
| 206 | if (!iwag->iwalk_fn) |
| 207 | continue; |
| 208 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 209 | for (j = 0; j < XFS_INODES_PER_CHUNK; j++) { |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 210 | if (xfs_pwork_want_abort(&iwag->pwork)) |
| 211 | return 0; |
| 212 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 213 | /* Skip if this inode is free */ |
| 214 | if (XFS_INOBT_MASK(j) & irec->ir_free) |
| 215 | continue; |
| 216 | |
| 217 | /* Otherwise call our function. */ |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 218 | ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, |
| 219 | irec->ir_startino + j); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 220 | error = iwag->iwalk_fn(mp, tp, ino, iwag->data); |
| 221 | if (error) |
| 222 | return error; |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
| 229 | /* Delete cursor and let go of AGI. */ |
| 230 | static inline void |
| 231 | xfs_iwalk_del_inobt( |
| 232 | struct xfs_trans *tp, |
| 233 | struct xfs_btree_cur **curpp, |
| 234 | struct xfs_buf **agi_bpp, |
| 235 | int error) |
| 236 | { |
| 237 | if (*curpp) { |
| 238 | xfs_btree_del_cursor(*curpp, error); |
| 239 | *curpp = NULL; |
| 240 | } |
| 241 | if (*agi_bpp) { |
| 242 | xfs_trans_brelse(tp, *agi_bpp); |
| 243 | *agi_bpp = NULL; |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | /* |
| 248 | * Set ourselves up for walking inobt records starting from a given point in |
| 249 | * the filesystem. |
| 250 | * |
| 251 | * If caller passed in a nonzero start inode number, load the record from the |
| 252 | * inobt and make the record look like all the inodes before agino are free so |
| 253 | * that we skip them, and then move the cursor to the next inobt record. This |
| 254 | * is how we support starting an iwalk in the middle of an inode chunk. |
| 255 | * |
| 256 | * If the caller passed in a start number of zero, move the cursor to the first |
| 257 | * inobt record. |
| 258 | * |
| 259 | * The caller is responsible for cleaning up the cursor and buffer pointer |
| 260 | * regardless of the error status. |
| 261 | */ |
| 262 | STATIC int |
| 263 | xfs_iwalk_ag_start( |
| 264 | struct xfs_iwalk_ag *iwag, |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 265 | xfs_agino_t agino, |
| 266 | struct xfs_btree_cur **curpp, |
| 267 | struct xfs_buf **agi_bpp, |
| 268 | int *has_more) |
| 269 | { |
| 270 | struct xfs_mount *mp = iwag->mp; |
| 271 | struct xfs_trans *tp = iwag->tp; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 272 | struct xfs_perag *pag = iwag->pag; |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 273 | struct xfs_inobt_rec_incore *irec; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 274 | int error; |
| 275 | |
| 276 | /* Set up a fresh cursor and empty the inobt cache. */ |
| 277 | iwag->nr_recs = 0; |
Dave Chinner | 7b13c51 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 278 | error = xfs_inobt_cur(mp, tp, pag, XFS_BTNUM_INO, curpp, agi_bpp); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 279 | if (error) |
| 280 | return error; |
| 281 | |
| 282 | /* Starting at the beginning of the AG? That's easy! */ |
| 283 | if (agino == 0) |
| 284 | return xfs_inobt_lookup(*curpp, 0, XFS_LOOKUP_GE, has_more); |
| 285 | |
| 286 | /* |
| 287 | * Otherwise, we have to grab the inobt record where we left off, stuff |
| 288 | * the record into our cache, and then see if there are more records. |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 289 | * We require a lookup cache of at least two elements so that the |
| 290 | * caller doesn't have to deal with tearing down the cursor to walk the |
| 291 | * records. |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 292 | */ |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 293 | error = xfs_inobt_lookup(*curpp, agino, XFS_LOOKUP_LE, has_more); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 294 | if (error) |
| 295 | return error; |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 296 | |
| 297 | /* |
| 298 | * If the LE lookup at @agino yields no records, jump ahead to the |
| 299 | * inobt cursor increment to see if there are more records to process. |
| 300 | */ |
| 301 | if (!*has_more) |
| 302 | goto out_advance; |
| 303 | |
| 304 | /* Get the record, should always work */ |
| 305 | irec = &iwag->recs[iwag->nr_recs]; |
| 306 | error = xfs_inobt_get_rec(*curpp, irec, has_more); |
| 307 | if (error) |
| 308 | return error; |
Darrick J. Wong | f9e0370 | 2019-11-11 12:52:18 -0800 | [diff] [blame] | 309 | if (XFS_IS_CORRUPT(mp, *has_more != 1)) |
| 310 | return -EFSCORRUPTED; |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 311 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 312 | iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno, |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 313 | irec->ir_startino + XFS_INODES_PER_CHUNK - 1); |
| 314 | |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 315 | /* |
| 316 | * If the LE lookup yielded an inobt record before the cursor position, |
| 317 | * skip it and see if there's another one after it. |
| 318 | */ |
| 319 | if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) |
| 320 | goto out_advance; |
| 321 | |
| 322 | /* |
| 323 | * If agino fell in the middle of the inode record, make it look like |
| 324 | * the inodes up to agino are free so that we don't return them again. |
| 325 | */ |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 326 | if (iwag->trim_start) |
| 327 | xfs_iwalk_adjust_start(agino, irec); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 328 | |
| 329 | /* |
| 330 | * The prefetch calculation is supposed to give us a large enough inobt |
| 331 | * record cache that grab_ichunk can stage a partial first record and |
| 332 | * the loop body can cache a record without having to check for cache |
| 333 | * space until after it reads an inobt record. |
| 334 | */ |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 335 | iwag->nr_recs++; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 336 | ASSERT(iwag->nr_recs < iwag->sz_recs); |
| 337 | |
Darrick J. Wong | 2b5eb82 | 2019-07-02 09:39:42 -0700 | [diff] [blame] | 338 | out_advance: |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 339 | return xfs_btree_increment(*curpp, 0, has_more); |
| 340 | } |
| 341 | |
| 342 | /* |
| 343 | * The inobt record cache is full, so preserve the inobt cursor state and |
| 344 | * run callbacks on the cached inobt records. When we're done, restore the |
| 345 | * cursor state to wherever the cursor would have been had the cache not been |
| 346 | * full (and therefore we could've just incremented the cursor) if *@has_more |
| 347 | * is true. On exit, *@has_more will indicate whether or not the caller should |
| 348 | * try for more inode records. |
| 349 | */ |
| 350 | STATIC int |
| 351 | xfs_iwalk_run_callbacks( |
| 352 | struct xfs_iwalk_ag *iwag, |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 353 | struct xfs_btree_cur **curpp, |
| 354 | struct xfs_buf **agi_bpp, |
| 355 | int *has_more) |
| 356 | { |
| 357 | struct xfs_mount *mp = iwag->mp; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 358 | struct xfs_inobt_rec_incore *irec; |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 359 | xfs_agino_t next_agino; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 360 | int error; |
| 361 | |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 362 | next_agino = XFS_INO_TO_AGINO(mp, iwag->lastino) + 1; |
| 363 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 364 | ASSERT(iwag->nr_recs > 0); |
| 365 | |
| 366 | /* Delete cursor but remember the last record we cached... */ |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 367 | xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 368 | irec = &iwag->recs[iwag->nr_recs - 1]; |
Darrick J. Wong | a5336d6 | 2020-12-07 10:41:46 -0800 | [diff] [blame] | 369 | ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 370 | |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 371 | if (iwag->drop_trans) { |
| 372 | xfs_trans_cancel(iwag->tp); |
| 373 | iwag->tp = NULL; |
| 374 | } |
| 375 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 376 | error = xfs_iwalk_ag_recs(iwag); |
| 377 | if (error) |
| 378 | return error; |
| 379 | |
| 380 | /* ...empty the cache... */ |
| 381 | iwag->nr_recs = 0; |
| 382 | |
| 383 | if (!has_more) |
| 384 | return 0; |
| 385 | |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 386 | if (iwag->drop_trans) { |
| 387 | error = xfs_trans_alloc_empty(mp, &iwag->tp); |
| 388 | if (error) |
| 389 | return error; |
| 390 | } |
| 391 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 392 | /* ...and recreate the cursor just past where we left off. */ |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 393 | error = xfs_inobt_cur(mp, iwag->tp, iwag->pag, XFS_BTNUM_INO, curpp, |
| 394 | agi_bpp); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 395 | if (error) |
| 396 | return error; |
| 397 | |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 398 | return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | /* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */ |
| 402 | STATIC int |
| 403 | xfs_iwalk_ag( |
| 404 | struct xfs_iwalk_ag *iwag) |
| 405 | { |
| 406 | struct xfs_mount *mp = iwag->mp; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 407 | struct xfs_perag *pag = iwag->pag; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 408 | struct xfs_buf *agi_bp = NULL; |
| 409 | struct xfs_btree_cur *cur = NULL; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 410 | xfs_agino_t agino; |
| 411 | int has_more; |
| 412 | int error = 0; |
| 413 | |
| 414 | /* Set up our cursor at the right place in the inode btree. */ |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 415 | ASSERT(pag->pag_agno == XFS_INO_TO_AGNO(mp, iwag->startino)); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 416 | agino = XFS_INO_TO_AGINO(mp, iwag->startino); |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 417 | error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 418 | |
| 419 | while (!error && has_more) { |
| 420 | struct xfs_inobt_rec_incore *irec; |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 421 | xfs_ino_t rec_fsino; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 422 | |
| 423 | cond_resched(); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 424 | if (xfs_pwork_want_abort(&iwag->pwork)) |
| 425 | goto out; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 426 | |
| 427 | /* Fetch the inobt record. */ |
| 428 | irec = &iwag->recs[iwag->nr_recs]; |
| 429 | error = xfs_inobt_get_rec(cur, irec, &has_more); |
| 430 | if (error || !has_more) |
| 431 | break; |
| 432 | |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 433 | /* Make sure that we always move forward. */ |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 434 | rec_fsino = XFS_AGINO_TO_INO(mp, pag->pag_agno, irec->ir_startino); |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 435 | if (iwag->lastino != NULLFSINO && |
| 436 | XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) { |
| 437 | error = -EFSCORRUPTED; |
| 438 | goto out; |
| 439 | } |
| 440 | iwag->lastino = rec_fsino + XFS_INODES_PER_CHUNK - 1; |
| 441 | |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 442 | /* No allocated inodes in this chunk; skip it. */ |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 443 | if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) { |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 444 | error = xfs_btree_increment(cur, 0, &has_more); |
| 445 | if (error) |
| 446 | break; |
| 447 | continue; |
| 448 | } |
| 449 | |
| 450 | /* |
| 451 | * Start readahead for this inode chunk in anticipation of |
| 452 | * walking the inodes. |
| 453 | */ |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 454 | if (iwag->iwalk_fn) |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 455 | xfs_iwalk_ichunk_ra(mp, pag, irec); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 456 | |
| 457 | /* |
| 458 | * If there's space in the buffer for more records, increment |
| 459 | * the btree cursor and grab more. |
| 460 | */ |
| 461 | if (++iwag->nr_recs < iwag->sz_recs) { |
| 462 | error = xfs_btree_increment(cur, 0, &has_more); |
| 463 | if (error || !has_more) |
| 464 | break; |
| 465 | continue; |
| 466 | } |
| 467 | |
| 468 | /* |
| 469 | * Otherwise, we need to save cursor state and run the callback |
| 470 | * function on the cached records. The run_callbacks function |
| 471 | * is supposed to return a cursor pointing to the record where |
| 472 | * we would be if we had been able to increment like above. |
| 473 | */ |
| 474 | ASSERT(has_more); |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 475 | error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | if (iwag->nr_recs == 0 || error) |
| 479 | goto out; |
| 480 | |
| 481 | /* Walk the unprocessed records in the cache. */ |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 482 | error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 483 | |
| 484 | out: |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 485 | xfs_iwalk_del_inobt(iwag->tp, &cur, &agi_bp, error); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 486 | return error; |
| 487 | } |
| 488 | |
| 489 | /* |
Darrick J. Wong | 938c710 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 490 | * We experimentally determined that the reduction in ioctl call overhead |
| 491 | * diminishes when userspace asks for more than 2048 inodes, so we'll cap |
| 492 | * prefetch at this point. |
| 493 | */ |
| 494 | #define IWALK_MAX_INODE_PREFETCH (2048U) |
| 495 | |
| 496 | /* |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 497 | * Given the number of inodes to prefetch, set the number of inobt records that |
| 498 | * we cache in memory, which controls the number of inodes we try to read |
Darrick J. Wong | 938c710 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 499 | * ahead. Set the maximum if @inodes == 0. |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 500 | */ |
| 501 | static inline unsigned int |
| 502 | xfs_iwalk_prefetch( |
Darrick J. Wong | 938c710 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 503 | unsigned int inodes) |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 504 | { |
Darrick J. Wong | 938c710 | 2019-07-02 09:39:40 -0700 | [diff] [blame] | 505 | unsigned int inobt_records; |
| 506 | |
| 507 | /* |
| 508 | * If the caller didn't tell us the number of inodes they wanted, |
| 509 | * assume the maximum prefetch possible for best performance. |
| 510 | * Otherwise, cap prefetch at that maximum so that we don't start an |
| 511 | * absurd amount of prefetch. |
| 512 | */ |
| 513 | if (inodes == 0) |
| 514 | inodes = IWALK_MAX_INODE_PREFETCH; |
| 515 | inodes = min(inodes, IWALK_MAX_INODE_PREFETCH); |
| 516 | |
| 517 | /* Round the inode count up to a full chunk. */ |
| 518 | inodes = round_up(inodes, XFS_INODES_PER_CHUNK); |
| 519 | |
| 520 | /* |
| 521 | * In order to convert the number of inodes to prefetch into an |
| 522 | * estimate of the number of inobt records to cache, we require a |
| 523 | * conversion factor that reflects our expectations of the average |
| 524 | * loading factor of an inode chunk. Based on data gathered, most |
| 525 | * (but not all) filesystems manage to keep the inode chunks totally |
| 526 | * full, so we'll underestimate slightly so that our readahead will |
| 527 | * still deliver the performance we want on aging filesystems: |
| 528 | * |
| 529 | * inobt = inodes / (INODES_PER_CHUNK * (4 / 5)); |
| 530 | * |
| 531 | * The funny math is to avoid integer division. |
| 532 | */ |
| 533 | inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK); |
| 534 | |
| 535 | /* |
| 536 | * Allocate enough space to prefetch at least two inobt records so that |
| 537 | * we can cache both the record where the iwalk started and the next |
| 538 | * record. This simplifies the AG inode walk loop setup code. |
| 539 | */ |
| 540 | return max(inobt_records, 2U); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 541 | } |
| 542 | |
| 543 | /* |
| 544 | * Walk all inodes in the filesystem starting from @startino. The @iwalk_fn |
| 545 | * will be called for each allocated inode, being passed the inode's number and |
| 546 | * @data. @max_prefetch controls how many inobt records' worth of inodes we |
| 547 | * try to readahead. |
| 548 | */ |
| 549 | int |
| 550 | xfs_iwalk( |
| 551 | struct xfs_mount *mp, |
| 552 | struct xfs_trans *tp, |
| 553 | xfs_ino_t startino, |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 554 | unsigned int flags, |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 555 | xfs_iwalk_fn iwalk_fn, |
| 556 | unsigned int inode_records, |
| 557 | void *data) |
| 558 | { |
| 559 | struct xfs_iwalk_ag iwag = { |
| 560 | .mp = mp, |
| 561 | .tp = tp, |
| 562 | .iwalk_fn = iwalk_fn, |
| 563 | .data = data, |
| 564 | .startino = startino, |
| 565 | .sz_recs = xfs_iwalk_prefetch(inode_records), |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 566 | .trim_start = 1, |
| 567 | .skip_empty = 1, |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 568 | .pwork = XFS_PWORK_SINGLE_THREADED, |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 569 | .lastino = NULLFSINO, |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 570 | }; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 571 | struct xfs_perag *pag; |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 572 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); |
| 573 | int error; |
| 574 | |
| 575 | ASSERT(agno < mp->m_sb.sb_agcount); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 576 | ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 577 | |
| 578 | error = xfs_iwalk_alloc(&iwag); |
| 579 | if (error) |
| 580 | return error; |
| 581 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 582 | for_each_perag_from(mp, agno, pag) { |
| 583 | iwag.pag = pag; |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 584 | error = xfs_iwalk_ag(&iwag); |
| 585 | if (error) |
| 586 | break; |
| 587 | iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 588 | if (flags & XFS_INOBT_WALK_SAME_AG) |
| 589 | break; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 590 | iwag.pag = NULL; |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 591 | } |
| 592 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 593 | if (iwag.pag) |
| 594 | xfs_perag_put(pag); |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 595 | xfs_iwalk_free(&iwag); |
| 596 | return error; |
| 597 | } |
| 598 | |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 599 | /* Run per-thread iwalk work. */ |
| 600 | static int |
| 601 | xfs_iwalk_ag_work( |
| 602 | struct xfs_mount *mp, |
| 603 | struct xfs_pwork *pwork) |
| 604 | { |
| 605 | struct xfs_iwalk_ag *iwag; |
| 606 | int error = 0; |
| 607 | |
| 608 | iwag = container_of(pwork, struct xfs_iwalk_ag, pwork); |
| 609 | if (xfs_pwork_want_abort(pwork)) |
| 610 | goto out; |
| 611 | |
| 612 | error = xfs_iwalk_alloc(iwag); |
| 613 | if (error) |
| 614 | goto out; |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 615 | /* |
| 616 | * Grab an empty transaction so that we can use its recursive buffer |
| 617 | * locking abilities to detect cycles in the inobt without deadlocking. |
| 618 | */ |
| 619 | error = xfs_trans_alloc_empty(mp, &iwag->tp); |
| 620 | if (error) |
| 621 | goto out; |
| 622 | iwag->drop_trans = 1; |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 623 | |
| 624 | error = xfs_iwalk_ag(iwag); |
Darrick J. Wong | a6343e4 | 2021-08-06 11:05:43 -0700 | [diff] [blame] | 625 | if (iwag->tp) |
| 626 | xfs_trans_cancel(iwag->tp); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 627 | xfs_iwalk_free(iwag); |
| 628 | out: |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 629 | xfs_perag_put(iwag->pag); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 630 | kmem_free(iwag); |
| 631 | return error; |
| 632 | } |
| 633 | |
| 634 | /* |
| 635 | * Walk all the inodes in the filesystem using multiple threads to process each |
| 636 | * AG. |
| 637 | */ |
| 638 | int |
| 639 | xfs_iwalk_threaded( |
| 640 | struct xfs_mount *mp, |
| 641 | xfs_ino_t startino, |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 642 | unsigned int flags, |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 643 | xfs_iwalk_fn iwalk_fn, |
| 644 | unsigned int inode_records, |
Darrick J. Wong | 3e5a428 | 2019-07-03 07:33:27 -0700 | [diff] [blame] | 645 | bool polled, |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 646 | void *data) |
| 647 | { |
| 648 | struct xfs_pwork_ctl pctl; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 649 | struct xfs_perag *pag; |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 650 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 651 | int error; |
| 652 | |
| 653 | ASSERT(agno < mp->m_sb.sb_agcount); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 654 | ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL)); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 655 | |
Darrick J. Wong | f83d436 | 2021-01-22 16:48:41 -0800 | [diff] [blame] | 656 | error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk"); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 657 | if (error) |
| 658 | return error; |
| 659 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 660 | for_each_perag_from(mp, agno, pag) { |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 661 | struct xfs_iwalk_ag *iwag; |
| 662 | |
| 663 | if (xfs_pwork_ctl_want_abort(&pctl)) |
| 664 | break; |
| 665 | |
Tetsuo Handa | 707e0dd | 2019-08-26 12:06:22 -0700 | [diff] [blame] | 666 | iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 667 | iwag->mp = mp; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 668 | |
| 669 | /* |
| 670 | * perag is being handed off to async work, so take another |
| 671 | * reference for the async work to release. |
| 672 | */ |
| 673 | atomic_inc(&pag->pag_ref); |
| 674 | iwag->pag = pag; |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 675 | iwag->iwalk_fn = iwalk_fn; |
| 676 | iwag->data = data; |
| 677 | iwag->startino = startino; |
| 678 | iwag->sz_recs = xfs_iwalk_prefetch(inode_records); |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 679 | iwag->lastino = NULLFSINO; |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 680 | xfs_pwork_queue(&pctl, &iwag->pwork); |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 681 | startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 682 | if (flags & XFS_INOBT_WALK_SAME_AG) |
| 683 | break; |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 684 | } |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 685 | if (pag) |
| 686 | xfs_perag_put(pag); |
Darrick J. Wong | 3e5a428 | 2019-07-03 07:33:27 -0700 | [diff] [blame] | 687 | if (polled) |
| 688 | xfs_pwork_poll(&pctl); |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 689 | return xfs_pwork_destroy(&pctl); |
| 690 | } |
| 691 | |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 692 | /* |
| 693 | * Allow callers to cache up to a page's worth of inobt records. This reflects |
| 694 | * the existing inumbers prefetching behavior. Since the inobt walk does not |
| 695 | * itself do anything with the inobt records, we can set a fairly high limit |
| 696 | * here. |
| 697 | */ |
| 698 | #define MAX_INOBT_WALK_PREFETCH \ |
| 699 | (PAGE_SIZE / sizeof(struct xfs_inobt_rec_incore)) |
| 700 | |
| 701 | /* |
| 702 | * Given the number of records that the user wanted, set the number of inobt |
| 703 | * records that we buffer in memory. Set the maximum if @inobt_records == 0. |
| 704 | */ |
| 705 | static inline unsigned int |
| 706 | xfs_inobt_walk_prefetch( |
| 707 | unsigned int inobt_records) |
| 708 | { |
| 709 | /* |
| 710 | * If the caller didn't tell us the number of inobt records they |
| 711 | * wanted, assume the maximum prefetch possible for best performance. |
| 712 | */ |
| 713 | if (inobt_records == 0) |
| 714 | inobt_records = MAX_INOBT_WALK_PREFETCH; |
| 715 | |
| 716 | /* |
| 717 | * Allocate enough space to prefetch at least two inobt records so that |
| 718 | * we can cache both the record where the iwalk started and the next |
| 719 | * record. This simplifies the AG inode walk loop setup code. |
| 720 | */ |
| 721 | inobt_records = max(inobt_records, 2U); |
| 722 | |
| 723 | /* |
| 724 | * Cap prefetch at that maximum so that we don't use an absurd amount |
| 725 | * of memory. |
| 726 | */ |
| 727 | return min_t(unsigned int, inobt_records, MAX_INOBT_WALK_PREFETCH); |
| 728 | } |
| 729 | |
| 730 | /* |
| 731 | * Walk all inode btree records in the filesystem starting from @startino. The |
| 732 | * @inobt_walk_fn will be called for each btree record, being passed the incore |
| 733 | * record and @data. @max_prefetch controls how many inobt records we try to |
| 734 | * cache ahead of time. |
| 735 | */ |
| 736 | int |
| 737 | xfs_inobt_walk( |
| 738 | struct xfs_mount *mp, |
| 739 | struct xfs_trans *tp, |
| 740 | xfs_ino_t startino, |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 741 | unsigned int flags, |
Darrick J. Wong | 04b8fba | 2019-07-02 09:39:43 -0700 | [diff] [blame] | 742 | xfs_inobt_walk_fn inobt_walk_fn, |
| 743 | unsigned int inobt_records, |
| 744 | void *data) |
| 745 | { |
| 746 | struct xfs_iwalk_ag iwag = { |
| 747 | .mp = mp, |
| 748 | .tp = tp, |
| 749 | .inobt_walk_fn = inobt_walk_fn, |
| 750 | .data = data, |
| 751 | .startino = startino, |
| 752 | .sz_recs = xfs_inobt_walk_prefetch(inobt_records), |
Darrick J. Wong | 4078671 | 2019-07-03 07:33:26 -0700 | [diff] [blame] | 753 | .pwork = XFS_PWORK_SINGLE_THREADED, |
Darrick J. Wong | 27c14b5 | 2020-11-14 09:59:22 -0800 | [diff] [blame] | 754 | .lastino = NULLFSINO, |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 755 | }; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 756 | struct xfs_perag *pag; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 757 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, startino); |
| 758 | int error; |
| 759 | |
| 760 | ASSERT(agno < mp->m_sb.sb_agcount); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 761 | ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL)); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 762 | |
| 763 | error = xfs_iwalk_alloc(&iwag); |
| 764 | if (error) |
| 765 | return error; |
| 766 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 767 | for_each_perag_from(mp, agno, pag) { |
| 768 | iwag.pag = pag; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 769 | error = xfs_iwalk_ag(&iwag); |
| 770 | if (error) |
| 771 | break; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 772 | iwag.startino = XFS_AGINO_TO_INO(mp, pag->pag_agno + 1, 0); |
Darrick J. Wong | 13d59a2 | 2019-07-03 20:36:28 -0700 | [diff] [blame] | 773 | if (flags & XFS_INOBT_WALK_SAME_AG) |
| 774 | break; |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 775 | iwag.pag = NULL; |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 776 | } |
| 777 | |
Dave Chinner | 6f4118f | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 778 | if (iwag.pag) |
| 779 | xfs_perag_put(pag); |
Darrick J. Wong | a211432 | 2019-07-02 09:39:38 -0700 | [diff] [blame] | 780 | xfs_iwalk_free(&iwag); |
| 781 | return error; |
| 782 | } |