David Sterba | 9888c34 | 2018-04-03 19:16:55 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011 STRATO. All rights reserved. |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
David Sterba | 9888c34 | 2018-04-03 19:16:55 +0200 | [diff] [blame] | 6 | #ifndef BTRFS_BACKREF_H |
| 7 | #define BTRFS_BACKREF_H |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 8 | |
Filipe Brandenburger | 55e301f | 2013-01-29 06:04:50 +0000 | [diff] [blame] | 9 | #include <linux/btrfs.h> |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 10 | #include "ulist.h" |
Qu Wenruo | 741188d | 2020-03-03 13:26:12 +0800 | [diff] [blame] | 11 | #include "disk-io.h" |
Alexander Block | 91cb916 | 2012-06-03 14:23:23 +0200 | [diff] [blame] | 12 | #include "extent_io.h" |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 13 | |
| 14 | struct inode_fs_paths { |
| 15 | struct btrfs_path *btrfs_path; |
| 16 | struct btrfs_root *fs_root; |
| 17 | struct btrfs_data_container *fspath; |
| 18 | }; |
| 19 | |
| 20 | typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, |
| 21 | void *ctx); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 22 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 23 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 24 | struct btrfs_path *path, struct btrfs_key *found_key, |
| 25 | u64 *flags); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 26 | |
| 27 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
Liu Bo | 6eda71d | 2014-06-09 10:54:07 +0800 | [diff] [blame] | 28 | struct btrfs_key *key, struct btrfs_extent_item *ei, |
| 29 | u32 item_size, u64 *out_root, u8 *out_level); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 30 | |
| 31 | int iterate_extent_inodes(struct btrfs_fs_info *fs_info, |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 32 | u64 extent_item_objectid, |
Jan Schmidt | 7a3ae2f | 2012-03-23 17:32:28 +0100 | [diff] [blame] | 33 | u64 extent_offset, int search_commit_root, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 34 | iterate_extent_inodes_t *iterate, void *ctx, |
| 35 | bool ignore_offset); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 36 | |
| 37 | int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, |
| 38 | struct btrfs_path *path, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 39 | iterate_extent_inodes_t *iterate, void *ctx, |
| 40 | bool ignore_offset); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 41 | |
| 42 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); |
| 43 | |
Qu Wenruo | 19b546d | 2020-03-10 16:14:15 +0800 | [diff] [blame] | 44 | int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, |
| 45 | struct btrfs_fs_info *fs_info, u64 bytenr, |
| 46 | u64 time_seq, struct ulist **leafs, |
| 47 | const u64 *extent_item_pos, bool ignore_offset); |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 48 | int btrfs_find_all_roots(struct btrfs_trans_handle *trans, |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 49 | struct btrfs_fs_info *fs_info, u64 bytenr, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 50 | u64 time_seq, struct ulist **roots, bool ignore_offset); |
Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 51 | char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, |
| 52 | u32 name_len, unsigned long name_off, |
| 53 | struct extent_buffer *eb_in, u64 parent, |
| 54 | char *dest, u32 size); |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 55 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 56 | struct btrfs_data_container *init_data_container(u32 total_bytes); |
| 57 | struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, |
| 58 | struct btrfs_path *path); |
| 59 | void free_ipath(struct inode_fs_paths *ipath); |
| 60 | |
Mark Fasheh | f186373 | 2012-08-08 11:32:27 -0700 | [diff] [blame] | 61 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, |
| 62 | u64 start_off, struct btrfs_path *path, |
| 63 | struct btrfs_inode_extref **ret_extref, |
| 64 | u64 *found_off); |
David Sterba | 5911c8f | 2019-05-15 15:31:04 +0200 | [diff] [blame] | 65 | int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, |
| 66 | struct ulist *roots, struct ulist *tmp_ulist); |
Mark Fasheh | f186373 | 2012-08-08 11:32:27 -0700 | [diff] [blame] | 67 | |
Wang Shilong | b9e9a6c | 2013-08-09 13:25:36 +0800 | [diff] [blame] | 68 | int __init btrfs_prelim_ref_init(void); |
David Sterba | e67c718 | 2018-02-19 17:24:18 +0100 | [diff] [blame] | 69 | void __cold btrfs_prelim_ref_exit(void); |
Jeff Mahoney | 0014275 | 2017-07-12 16:20:08 -0600 | [diff] [blame] | 70 | |
| 71 | struct prelim_ref { |
| 72 | struct rb_node rbnode; |
| 73 | u64 root_id; |
| 74 | struct btrfs_key key_for_search; |
| 75 | int level; |
| 76 | int count; |
| 77 | struct extent_inode_elem *inode_list; |
| 78 | u64 parent; |
| 79 | u64 wanted_disk_byte; |
| 80 | }; |
| 81 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 82 | /* |
| 83 | * Iterate backrefs of one extent. |
| 84 | * |
| 85 | * Now it only supports iteration of tree block in commit root. |
| 86 | */ |
| 87 | struct btrfs_backref_iter { |
| 88 | u64 bytenr; |
| 89 | struct btrfs_path *path; |
| 90 | struct btrfs_fs_info *fs_info; |
| 91 | struct btrfs_key cur_key; |
| 92 | u32 item_ptr; |
| 93 | u32 cur_ptr; |
| 94 | u32 end_ptr; |
| 95 | }; |
| 96 | |
| 97 | struct btrfs_backref_iter *btrfs_backref_iter_alloc( |
| 98 | struct btrfs_fs_info *fs_info, gfp_t gfp_flag); |
| 99 | |
| 100 | static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter) |
| 101 | { |
| 102 | if (!iter) |
| 103 | return; |
| 104 | btrfs_free_path(iter->path); |
| 105 | kfree(iter); |
| 106 | } |
| 107 | |
Qu Wenruo | c39c2dd | 2020-02-13 15:04:04 +0800 | [diff] [blame] | 108 | static inline struct extent_buffer *btrfs_backref_get_eb( |
| 109 | struct btrfs_backref_iter *iter) |
| 110 | { |
| 111 | if (!iter) |
| 112 | return NULL; |
| 113 | return iter->path->nodes[0]; |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data |
| 118 | * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header. |
| 119 | * |
| 120 | * This helper determines if that's the case. |
| 121 | */ |
| 122 | static inline bool btrfs_backref_has_tree_block_info( |
| 123 | struct btrfs_backref_iter *iter) |
| 124 | { |
| 125 | if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY && |
| 126 | iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item)) |
| 127 | return true; |
| 128 | return false; |
| 129 | } |
| 130 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 131 | int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr); |
| 132 | |
Qu Wenruo | c39c2dd | 2020-02-13 15:04:04 +0800 | [diff] [blame] | 133 | int btrfs_backref_iter_next(struct btrfs_backref_iter *iter); |
| 134 | |
| 135 | static inline bool btrfs_backref_iter_is_inline_ref( |
| 136 | struct btrfs_backref_iter *iter) |
| 137 | { |
| 138 | if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || |
| 139 | iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) |
| 140 | return true; |
| 141 | return false; |
| 142 | } |
| 143 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 144 | static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter) |
| 145 | { |
| 146 | iter->bytenr = 0; |
| 147 | iter->item_ptr = 0; |
| 148 | iter->cur_ptr = 0; |
| 149 | iter->end_ptr = 0; |
| 150 | btrfs_release_path(iter->path); |
| 151 | memset(&iter->cur_key, 0, sizeof(iter->cur_key)); |
| 152 | } |
| 153 | |
Qu Wenruo | 7053544 | 2020-03-23 15:03:56 +0800 | [diff] [blame] | 154 | /* |
| 155 | * Backref cache related structures |
| 156 | * |
| 157 | * The whole objective of backref_cache is to build a bi-directional map |
| 158 | * of tree blocks (represented by backref_node) and all their parents. |
| 159 | */ |
| 160 | |
| 161 | /* |
| 162 | * Represent a tree block in the backref cache |
| 163 | */ |
| 164 | struct btrfs_backref_node { |
Qu Wenruo | e9a28dc | 2020-03-26 14:11:09 +0800 | [diff] [blame] | 165 | struct { |
| 166 | struct rb_node rb_node; |
| 167 | u64 bytenr; |
| 168 | }; /* Use rb_simple_node for search/insert */ |
Qu Wenruo | 7053544 | 2020-03-23 15:03:56 +0800 | [diff] [blame] | 169 | |
| 170 | u64 new_bytenr; |
| 171 | /* Objectid of tree block owner, can be not uptodate */ |
| 172 | u64 owner; |
| 173 | /* Link to pending, changed or detached list */ |
| 174 | struct list_head list; |
| 175 | |
| 176 | /* List of upper level edges, which link this node to its parents */ |
| 177 | struct list_head upper; |
| 178 | /* List of lower level edges, which link this node to its children */ |
| 179 | struct list_head lower; |
| 180 | |
| 181 | /* NULL if this node is not tree root */ |
| 182 | struct btrfs_root *root; |
| 183 | /* Extent buffer got by COWing the block */ |
| 184 | struct extent_buffer *eb; |
| 185 | /* Level of the tree block */ |
| 186 | unsigned int level:8; |
| 187 | /* Is the block in non-reference counted tree */ |
| 188 | unsigned int cowonly:1; |
| 189 | /* 1 if no child node is in the cache */ |
| 190 | unsigned int lowest:1; |
| 191 | /* Is the extent buffer locked */ |
| 192 | unsigned int locked:1; |
| 193 | /* Has the block been processed */ |
| 194 | unsigned int processed:1; |
| 195 | /* Have backrefs of this block been checked */ |
| 196 | unsigned int checked:1; |
| 197 | /* |
| 198 | * 1 if corresponding block has been COWed but some upper level block |
| 199 | * pointers may not point to the new location |
| 200 | */ |
| 201 | unsigned int pending:1; |
| 202 | /* 1 if the backref node isn't connected to any other backref node */ |
| 203 | unsigned int detached:1; |
| 204 | |
| 205 | /* |
| 206 | * For generic purpose backref cache, where we only care if it's a reloc |
| 207 | * root, doesn't care the source subvolid. |
| 208 | */ |
| 209 | unsigned int is_reloc_root:1; |
| 210 | }; |
| 211 | |
| 212 | #define LOWER 0 |
| 213 | #define UPPER 1 |
| 214 | |
| 215 | /* |
| 216 | * Represent an edge connecting upper and lower backref nodes. |
| 217 | */ |
| 218 | struct btrfs_backref_edge { |
| 219 | /* |
| 220 | * list[LOWER] is linked to btrfs_backref_node::upper of lower level |
| 221 | * node, and list[UPPER] is linked to btrfs_backref_node::lower of |
| 222 | * upper level node. |
| 223 | * |
| 224 | * Also, build_backref_tree() uses list[UPPER] for pending edges, before |
| 225 | * linking list[UPPER] to its upper level nodes. |
| 226 | */ |
| 227 | struct list_head list[2]; |
| 228 | |
| 229 | /* Two related nodes */ |
| 230 | struct btrfs_backref_node *node[2]; |
| 231 | }; |
| 232 | |
| 233 | struct btrfs_backref_cache { |
| 234 | /* Red black tree of all backref nodes in the cache */ |
| 235 | struct rb_root rb_root; |
| 236 | /* For passing backref nodes to btrfs_reloc_cow_block */ |
| 237 | struct btrfs_backref_node *path[BTRFS_MAX_LEVEL]; |
| 238 | /* |
| 239 | * List of blocks that have been COWed but some block pointers in upper |
| 240 | * level blocks may not reflect the new location |
| 241 | */ |
| 242 | struct list_head pending[BTRFS_MAX_LEVEL]; |
| 243 | /* List of backref nodes with no child node */ |
| 244 | struct list_head leaves; |
| 245 | /* List of blocks that have been COWed in current transaction */ |
| 246 | struct list_head changed; |
| 247 | /* List of detached backref node. */ |
| 248 | struct list_head detached; |
| 249 | |
| 250 | u64 last_trans; |
| 251 | |
| 252 | int nr_nodes; |
| 253 | int nr_edges; |
| 254 | |
| 255 | /* List of unchecked backref edges during backref cache build */ |
| 256 | struct list_head pending_edge; |
| 257 | |
| 258 | /* List of useless backref nodes during backref cache build */ |
| 259 | struct list_head useless_node; |
| 260 | |
| 261 | struct btrfs_fs_info *fs_info; |
| 262 | |
| 263 | /* |
| 264 | * Whether this cache is for relocation |
| 265 | * |
| 266 | * Reloction backref cache require more info for reloc root compared |
| 267 | * to generic backref cache. |
| 268 | */ |
| 269 | unsigned int is_reloc; |
| 270 | }; |
| 271 | |
Qu Wenruo | 584fb12 | 2020-03-03 13:14:41 +0800 | [diff] [blame] | 272 | void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, |
| 273 | struct btrfs_backref_cache *cache, int is_reloc); |
Qu Wenruo | b1818da | 2020-03-03 13:21:30 +0800 | [diff] [blame] | 274 | struct btrfs_backref_node *btrfs_backref_alloc_node( |
| 275 | struct btrfs_backref_cache *cache, u64 bytenr, int level); |
Qu Wenruo | 47254d0 | 2020-03-03 13:22:57 +0800 | [diff] [blame] | 276 | struct btrfs_backref_edge *btrfs_backref_alloc_edge( |
| 277 | struct btrfs_backref_cache *cache); |
Qu Wenruo | 584fb12 | 2020-03-03 13:14:41 +0800 | [diff] [blame] | 278 | |
Qu Wenruo | f39911e5 | 2020-03-03 13:24:06 +0800 | [diff] [blame] | 279 | #define LINK_LOWER (1 << 0) |
| 280 | #define LINK_UPPER (1 << 1) |
| 281 | static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, |
| 282 | struct btrfs_backref_node *lower, |
| 283 | struct btrfs_backref_node *upper, |
| 284 | int link_which) |
| 285 | { |
| 286 | ASSERT(upper && lower && upper->level == lower->level + 1); |
| 287 | edge->node[LOWER] = lower; |
| 288 | edge->node[UPPER] = upper; |
| 289 | if (link_which & LINK_LOWER) |
| 290 | list_add_tail(&edge->list[LOWER], &lower->upper); |
| 291 | if (link_which & LINK_UPPER) |
| 292 | list_add_tail(&edge->list[UPPER], &upper->lower); |
| 293 | } |
| 294 | |
Qu Wenruo | 741188d | 2020-03-03 13:26:12 +0800 | [diff] [blame] | 295 | static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache, |
| 296 | struct btrfs_backref_node *node) |
| 297 | { |
| 298 | if (node) { |
| 299 | cache->nr_nodes--; |
| 300 | btrfs_put_root(node->root); |
| 301 | kfree(node); |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache, |
| 306 | struct btrfs_backref_edge *edge) |
| 307 | { |
| 308 | if (edge) { |
| 309 | cache->nr_edges--; |
| 310 | kfree(edge); |
| 311 | } |
| 312 | } |
| 313 | |
Qu Wenruo | b0fe707 | 2020-03-03 13:35:27 +0800 | [diff] [blame] | 314 | static inline void btrfs_backref_unlock_node_buffer( |
| 315 | struct btrfs_backref_node *node) |
| 316 | { |
| 317 | if (node->locked) { |
| 318 | btrfs_tree_unlock(node->eb); |
| 319 | node->locked = 0; |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | static inline void btrfs_backref_drop_node_buffer( |
| 324 | struct btrfs_backref_node *node) |
| 325 | { |
| 326 | if (node->eb) { |
| 327 | btrfs_backref_unlock_node_buffer(node); |
| 328 | free_extent_buffer(node->eb); |
| 329 | node->eb = NULL; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | /* |
| 334 | * Drop the backref node from cache without cleaning up its children |
| 335 | * edges. |
| 336 | * |
| 337 | * This can only be called on node without parent edges. |
| 338 | * The children edges are still kept as is. |
| 339 | */ |
| 340 | static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree, |
| 341 | struct btrfs_backref_node *node) |
| 342 | { |
| 343 | BUG_ON(!list_empty(&node->upper)); |
| 344 | |
| 345 | btrfs_backref_drop_node_buffer(node); |
| 346 | list_del(&node->list); |
| 347 | list_del(&node->lower); |
| 348 | if (!RB_EMPTY_NODE(&node->rb_node)) |
| 349 | rb_erase(&node->rb_node, &tree->rb_root); |
| 350 | btrfs_backref_free_node(tree, node); |
| 351 | } |
| 352 | |
Qu Wenruo | 023acb0 | 2020-03-23 15:42:25 +0800 | [diff] [blame] | 353 | void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, |
| 354 | struct btrfs_backref_node *node); |
| 355 | |
Qu Wenruo | 13fe1bd | 2020-03-03 13:55:12 +0800 | [diff] [blame] | 356 | void btrfs_backref_release_cache(struct btrfs_backref_cache *cache); |
| 357 | |
Qu Wenruo | 982c92c | 2020-03-26 14:21:36 +0800 | [diff] [blame] | 358 | static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info, |
| 359 | u64 bytenr, int errno) |
| 360 | { |
| 361 | btrfs_panic(fs_info, errno, |
| 362 | "Inconsistency in backref cache found at offset %llu", |
| 363 | bytenr); |
| 364 | } |
| 365 | |
Qu Wenruo | 1b60d2e | 2020-03-23 16:08:34 +0800 | [diff] [blame] | 366 | int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, |
| 367 | struct btrfs_path *path, |
| 368 | struct btrfs_backref_iter *iter, |
| 369 | struct btrfs_key *node_key, |
| 370 | struct btrfs_backref_node *cur); |
| 371 | |
Qu Wenruo | fc997ed | 2020-03-23 16:14:08 +0800 | [diff] [blame^] | 372 | int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, |
| 373 | struct btrfs_backref_node *start); |
| 374 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 375 | #endif |