David Sterba | 9888c34 | 2018-04-03 19:16:55 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011 STRATO. All rights reserved. |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
David Sterba | 9888c34 | 2018-04-03 19:16:55 +0200 | [diff] [blame] | 6 | #ifndef BTRFS_BACKREF_H |
| 7 | #define BTRFS_BACKREF_H |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 8 | |
Filipe Brandenburger | 55e301f | 2013-01-29 06:04:50 +0000 | [diff] [blame] | 9 | #include <linux/btrfs.h> |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 10 | #include "ulist.h" |
Alexander Block | 91cb916 | 2012-06-03 14:23:23 +0200 | [diff] [blame] | 11 | #include "extent_io.h" |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 12 | |
| 13 | struct inode_fs_paths { |
| 14 | struct btrfs_path *btrfs_path; |
| 15 | struct btrfs_root *fs_root; |
| 16 | struct btrfs_data_container *fspath; |
| 17 | }; |
| 18 | |
| 19 | typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, |
| 20 | void *ctx); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 21 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 22 | int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 23 | struct btrfs_path *path, struct btrfs_key *found_key, |
| 24 | u64 *flags); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 25 | |
| 26 | int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, |
Liu Bo | 6eda71d | 2014-06-09 10:54:07 +0800 | [diff] [blame] | 27 | struct btrfs_key *key, struct btrfs_extent_item *ei, |
| 28 | u32 item_size, u64 *out_root, u8 *out_level); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 29 | |
| 30 | int iterate_extent_inodes(struct btrfs_fs_info *fs_info, |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 31 | u64 extent_item_objectid, |
Jan Schmidt | 7a3ae2f | 2012-03-23 17:32:28 +0100 | [diff] [blame] | 32 | u64 extent_offset, int search_commit_root, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 33 | iterate_extent_inodes_t *iterate, void *ctx, |
| 34 | bool ignore_offset); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 35 | |
| 36 | int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, |
| 37 | struct btrfs_path *path, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 38 | iterate_extent_inodes_t *iterate, void *ctx, |
| 39 | bool ignore_offset); |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 40 | |
| 41 | int paths_from_inode(u64 inum, struct inode_fs_paths *ipath); |
| 42 | |
Qu Wenruo | 19b546d | 2020-03-10 16:14:15 +0800 | [diff] [blame] | 43 | int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, |
| 44 | struct btrfs_fs_info *fs_info, u64 bytenr, |
| 45 | u64 time_seq, struct ulist **leafs, |
| 46 | const u64 *extent_item_pos, bool ignore_offset); |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 47 | int btrfs_find_all_roots(struct btrfs_trans_handle *trans, |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 48 | struct btrfs_fs_info *fs_info, u64 bytenr, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 49 | u64 time_seq, struct ulist **roots, bool ignore_offset); |
Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 50 | char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, |
| 51 | u32 name_len, unsigned long name_off, |
| 52 | struct extent_buffer *eb_in, u64 parent, |
| 53 | char *dest, u32 size); |
Jan Schmidt | 8da6d58 | 2011-11-23 18:55:04 +0100 | [diff] [blame] | 54 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 55 | struct btrfs_data_container *init_data_container(u32 total_bytes); |
| 56 | struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, |
| 57 | struct btrfs_path *path); |
| 58 | void free_ipath(struct inode_fs_paths *ipath); |
| 59 | |
Mark Fasheh | f186373 | 2012-08-08 11:32:27 -0700 | [diff] [blame] | 60 | int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, |
| 61 | u64 start_off, struct btrfs_path *path, |
| 62 | struct btrfs_inode_extref **ret_extref, |
| 63 | u64 *found_off); |
David Sterba | 5911c8f | 2019-05-15 15:31:04 +0200 | [diff] [blame] | 64 | int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, |
| 65 | struct ulist *roots, struct ulist *tmp_ulist); |
Mark Fasheh | f186373 | 2012-08-08 11:32:27 -0700 | [diff] [blame] | 66 | |
Wang Shilong | b9e9a6c | 2013-08-09 13:25:36 +0800 | [diff] [blame] | 67 | int __init btrfs_prelim_ref_init(void); |
David Sterba | e67c718 | 2018-02-19 17:24:18 +0100 | [diff] [blame] | 68 | void __cold btrfs_prelim_ref_exit(void); |
Jeff Mahoney | 0014275 | 2017-07-12 16:20:08 -0600 | [diff] [blame] | 69 | |
| 70 | struct prelim_ref { |
| 71 | struct rb_node rbnode; |
| 72 | u64 root_id; |
| 73 | struct btrfs_key key_for_search; |
| 74 | int level; |
| 75 | int count; |
| 76 | struct extent_inode_elem *inode_list; |
| 77 | u64 parent; |
| 78 | u64 wanted_disk_byte; |
| 79 | }; |
| 80 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 81 | /* |
| 82 | * Iterate backrefs of one extent. |
| 83 | * |
| 84 | * Now it only supports iteration of tree block in commit root. |
| 85 | */ |
| 86 | struct btrfs_backref_iter { |
| 87 | u64 bytenr; |
| 88 | struct btrfs_path *path; |
| 89 | struct btrfs_fs_info *fs_info; |
| 90 | struct btrfs_key cur_key; |
| 91 | u32 item_ptr; |
| 92 | u32 cur_ptr; |
| 93 | u32 end_ptr; |
| 94 | }; |
| 95 | |
| 96 | struct btrfs_backref_iter *btrfs_backref_iter_alloc( |
| 97 | struct btrfs_fs_info *fs_info, gfp_t gfp_flag); |
| 98 | |
| 99 | static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter) |
| 100 | { |
| 101 | if (!iter) |
| 102 | return; |
| 103 | btrfs_free_path(iter->path); |
| 104 | kfree(iter); |
| 105 | } |
| 106 | |
Qu Wenruo | c39c2dd | 2020-02-13 15:04:04 +0800 | [diff] [blame] | 107 | static inline struct extent_buffer *btrfs_backref_get_eb( |
| 108 | struct btrfs_backref_iter *iter) |
| 109 | { |
| 110 | if (!iter) |
| 111 | return NULL; |
| 112 | return iter->path->nodes[0]; |
| 113 | } |
| 114 | |
| 115 | /* |
| 116 | * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data |
| 117 | * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header. |
| 118 | * |
| 119 | * This helper determines if that's the case. |
| 120 | */ |
| 121 | static inline bool btrfs_backref_has_tree_block_info( |
| 122 | struct btrfs_backref_iter *iter) |
| 123 | { |
| 124 | if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY && |
| 125 | iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item)) |
| 126 | return true; |
| 127 | return false; |
| 128 | } |
| 129 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 130 | int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr); |
| 131 | |
Qu Wenruo | c39c2dd | 2020-02-13 15:04:04 +0800 | [diff] [blame] | 132 | int btrfs_backref_iter_next(struct btrfs_backref_iter *iter); |
| 133 | |
| 134 | static inline bool btrfs_backref_iter_is_inline_ref( |
| 135 | struct btrfs_backref_iter *iter) |
| 136 | { |
| 137 | if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY || |
| 138 | iter->cur_key.type == BTRFS_METADATA_ITEM_KEY) |
| 139 | return true; |
| 140 | return false; |
| 141 | } |
| 142 | |
Qu Wenruo | a37f232 | 2020-02-13 14:11:04 +0800 | [diff] [blame] | 143 | static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter) |
| 144 | { |
| 145 | iter->bytenr = 0; |
| 146 | iter->item_ptr = 0; |
| 147 | iter->cur_ptr = 0; |
| 148 | iter->end_ptr = 0; |
| 149 | btrfs_release_path(iter->path); |
| 150 | memset(&iter->cur_key, 0, sizeof(iter->cur_key)); |
| 151 | } |
| 152 | |
Qu Wenruo | 7053544 | 2020-03-23 15:03:56 +0800 | [diff] [blame] | 153 | /* |
| 154 | * Backref cache related structures |
| 155 | * |
| 156 | * The whole objective of backref_cache is to build a bi-directional map |
| 157 | * of tree blocks (represented by backref_node) and all their parents. |
| 158 | */ |
| 159 | |
| 160 | /* |
| 161 | * Represent a tree block in the backref cache |
| 162 | */ |
| 163 | struct btrfs_backref_node { |
Qu Wenruo | e9a28dc | 2020-03-26 14:11:09 +0800 | [diff] [blame] | 164 | struct { |
| 165 | struct rb_node rb_node; |
| 166 | u64 bytenr; |
| 167 | }; /* Use rb_simple_node for search/insert */ |
Qu Wenruo | 7053544 | 2020-03-23 15:03:56 +0800 | [diff] [blame] | 168 | |
| 169 | u64 new_bytenr; |
| 170 | /* Objectid of tree block owner, can be not uptodate */ |
| 171 | u64 owner; |
| 172 | /* Link to pending, changed or detached list */ |
| 173 | struct list_head list; |
| 174 | |
| 175 | /* List of upper level edges, which link this node to its parents */ |
| 176 | struct list_head upper; |
| 177 | /* List of lower level edges, which link this node to its children */ |
| 178 | struct list_head lower; |
| 179 | |
| 180 | /* NULL if this node is not tree root */ |
| 181 | struct btrfs_root *root; |
| 182 | /* Extent buffer got by COWing the block */ |
| 183 | struct extent_buffer *eb; |
| 184 | /* Level of the tree block */ |
| 185 | unsigned int level:8; |
| 186 | /* Is the block in non-reference counted tree */ |
| 187 | unsigned int cowonly:1; |
| 188 | /* 1 if no child node is in the cache */ |
| 189 | unsigned int lowest:1; |
| 190 | /* Is the extent buffer locked */ |
| 191 | unsigned int locked:1; |
| 192 | /* Has the block been processed */ |
| 193 | unsigned int processed:1; |
| 194 | /* Have backrefs of this block been checked */ |
| 195 | unsigned int checked:1; |
| 196 | /* |
| 197 | * 1 if corresponding block has been COWed but some upper level block |
| 198 | * pointers may not point to the new location |
| 199 | */ |
| 200 | unsigned int pending:1; |
| 201 | /* 1 if the backref node isn't connected to any other backref node */ |
| 202 | unsigned int detached:1; |
| 203 | |
| 204 | /* |
| 205 | * For generic purpose backref cache, where we only care if it's a reloc |
| 206 | * root, doesn't care the source subvolid. |
| 207 | */ |
| 208 | unsigned int is_reloc_root:1; |
| 209 | }; |
| 210 | |
| 211 | #define LOWER 0 |
| 212 | #define UPPER 1 |
| 213 | |
| 214 | /* |
| 215 | * Represent an edge connecting upper and lower backref nodes. |
| 216 | */ |
| 217 | struct btrfs_backref_edge { |
| 218 | /* |
| 219 | * list[LOWER] is linked to btrfs_backref_node::upper of lower level |
| 220 | * node, and list[UPPER] is linked to btrfs_backref_node::lower of |
| 221 | * upper level node. |
| 222 | * |
| 223 | * Also, build_backref_tree() uses list[UPPER] for pending edges, before |
| 224 | * linking list[UPPER] to its upper level nodes. |
| 225 | */ |
| 226 | struct list_head list[2]; |
| 227 | |
| 228 | /* Two related nodes */ |
| 229 | struct btrfs_backref_node *node[2]; |
| 230 | }; |
| 231 | |
| 232 | struct btrfs_backref_cache { |
| 233 | /* Red black tree of all backref nodes in the cache */ |
| 234 | struct rb_root rb_root; |
| 235 | /* For passing backref nodes to btrfs_reloc_cow_block */ |
| 236 | struct btrfs_backref_node *path[BTRFS_MAX_LEVEL]; |
| 237 | /* |
| 238 | * List of blocks that have been COWed but some block pointers in upper |
| 239 | * level blocks may not reflect the new location |
| 240 | */ |
| 241 | struct list_head pending[BTRFS_MAX_LEVEL]; |
| 242 | /* List of backref nodes with no child node */ |
| 243 | struct list_head leaves; |
| 244 | /* List of blocks that have been COWed in current transaction */ |
| 245 | struct list_head changed; |
| 246 | /* List of detached backref node. */ |
| 247 | struct list_head detached; |
| 248 | |
| 249 | u64 last_trans; |
| 250 | |
| 251 | int nr_nodes; |
| 252 | int nr_edges; |
| 253 | |
| 254 | /* List of unchecked backref edges during backref cache build */ |
| 255 | struct list_head pending_edge; |
| 256 | |
| 257 | /* List of useless backref nodes during backref cache build */ |
| 258 | struct list_head useless_node; |
| 259 | |
| 260 | struct btrfs_fs_info *fs_info; |
| 261 | |
| 262 | /* |
| 263 | * Whether this cache is for relocation |
| 264 | * |
| 265 | * Reloction backref cache require more info for reloc root compared |
| 266 | * to generic backref cache. |
| 267 | */ |
| 268 | unsigned int is_reloc; |
| 269 | }; |
| 270 | |
Qu Wenruo | 584fb12 | 2020-03-03 13:14:41 +0800 | [diff] [blame] | 271 | void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, |
| 272 | struct btrfs_backref_cache *cache, int is_reloc); |
Qu Wenruo | b1818da | 2020-03-03 13:21:30 +0800 | [diff] [blame] | 273 | struct btrfs_backref_node *btrfs_backref_alloc_node( |
| 274 | struct btrfs_backref_cache *cache, u64 bytenr, int level); |
Qu Wenruo | 47254d0 | 2020-03-03 13:22:57 +0800 | [diff] [blame] | 275 | struct btrfs_backref_edge *btrfs_backref_alloc_edge( |
| 276 | struct btrfs_backref_cache *cache); |
Qu Wenruo | 584fb12 | 2020-03-03 13:14:41 +0800 | [diff] [blame] | 277 | |
Qu Wenruo | f39911e5 | 2020-03-03 13:24:06 +0800 | [diff] [blame^] | 278 | #define LINK_LOWER (1 << 0) |
| 279 | #define LINK_UPPER (1 << 1) |
| 280 | static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge, |
| 281 | struct btrfs_backref_node *lower, |
| 282 | struct btrfs_backref_node *upper, |
| 283 | int link_which) |
| 284 | { |
| 285 | ASSERT(upper && lower && upper->level == lower->level + 1); |
| 286 | edge->node[LOWER] = lower; |
| 287 | edge->node[UPPER] = upper; |
| 288 | if (link_which & LINK_LOWER) |
| 289 | list_add_tail(&edge->list[LOWER], &lower->upper); |
| 290 | if (link_which & LINK_UPPER) |
| 291 | list_add_tail(&edge->list[UPPER], &upper->lower); |
| 292 | } |
| 293 | |
Jan Schmidt | a542ad1 | 2011-06-13 19:52:59 +0200 | [diff] [blame] | 294 | #endif |