David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) Qu Wenruo 2017. All rights reserved. |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * The module is used to catch unexpected/corrupted tree block data. |
| 8 | * Such behavior can be caused either by a fuzzed image or bugs. |
| 9 | * |
| 10 | * The objective is to do leaf/node validation checks when tree block is read |
| 11 | * from disk, and check *every* possible member, so other code won't |
| 12 | * need to checking them again. |
| 13 | * |
| 14 | * Due to the potential and unwanted damage, every checker needs to be |
| 15 | * carefully reviewed otherwise so it does not prevent mount of valid images. |
| 16 | */ |
| 17 | |
Qu Wenruo | 02529d7 | 2019-04-24 15:22:53 +0800 | [diff] [blame] | 18 | #include <linux/types.h> |
| 19 | #include <linux/stddef.h> |
| 20 | #include <linux/error-injection.h> |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 21 | #include "ctree.h" |
| 22 | #include "tree-checker.h" |
| 23 | #include "disk-io.h" |
| 24 | #include "compression.h" |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 25 | #include "volumes.h" |
David Sterba | c149916 | 2019-10-01 19:44:42 +0200 | [diff] [blame] | 26 | #include "misc.h" |
Boris Burkov | 77eea05 | 2021-06-30 13:01:48 -0700 | [diff] [blame] | 27 | #include "btrfs_inode.h" |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 28 | |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 29 | /* |
| 30 | * Error message should follow the following format: |
| 31 | * corrupt <type>: <identifier>, <reason>[, <bad_value>] |
| 32 | * |
| 33 | * @type: leaf or node |
| 34 | * @identifier: the necessary info to locate the leaf/node. |
Andrea Gelmini | 52042d8 | 2018-11-28 12:05:13 +0100 | [diff] [blame] | 35 | * It's recommended to decode key.objecitd/offset if it's |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 36 | * meaningful. |
| 37 | * @reason: describe the error |
Andrea Gelmini | 52042d8 | 2018-11-28 12:05:13 +0100 | [diff] [blame] | 38 | * @bad_value: optional, it's recommended to output bad value and its |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 39 | * expected value (range). |
| 40 | * |
| 41 | * Since comma is used to separate the components, only space is allowed |
| 42 | * inside each component. |
| 43 | */ |
| 44 | |
| 45 | /* |
| 46 | * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt. |
| 47 | * Allows callers to customize the output. |
| 48 | */ |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 49 | __printf(3, 4) |
David Sterba | e67c718 | 2018-02-19 17:24:18 +0100 | [diff] [blame] | 50 | __cold |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 51 | static void generic_err(const struct extent_buffer *eb, int slot, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 52 | const char *fmt, ...) |
| 53 | { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 54 | const struct btrfs_fs_info *fs_info = eb->fs_info; |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 55 | struct va_format vaf; |
| 56 | va_list args; |
| 57 | |
| 58 | va_start(args, fmt); |
| 59 | |
| 60 | vaf.fmt = fmt; |
| 61 | vaf.va = &args; |
| 62 | |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 63 | btrfs_crit(fs_info, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 64 | "corrupt %s: root=%llu block=%llu slot=%d, %pV", |
| 65 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 66 | btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf); |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 67 | va_end(args); |
| 68 | } |
| 69 | |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 70 | /* |
| 71 | * Customized reporter for extent data item, since its key objectid and |
| 72 | * offset has its own meaning. |
| 73 | */ |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 74 | __printf(3, 4) |
David Sterba | e67c718 | 2018-02-19 17:24:18 +0100 | [diff] [blame] | 75 | __cold |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 76 | static void file_extent_err(const struct extent_buffer *eb, int slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 77 | const char *fmt, ...) |
| 78 | { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 79 | const struct btrfs_fs_info *fs_info = eb->fs_info; |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 80 | struct btrfs_key key; |
| 81 | struct va_format vaf; |
| 82 | va_list args; |
| 83 | |
| 84 | btrfs_item_key_to_cpu(eb, &key, slot); |
| 85 | va_start(args, fmt); |
| 86 | |
| 87 | vaf.fmt = fmt; |
| 88 | vaf.va = &args; |
| 89 | |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 90 | btrfs_crit(fs_info, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 91 | "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV", |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 92 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
| 93 | btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, |
| 94 | key.objectid, key.offset, &vaf); |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 95 | va_end(args); |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * Return 0 if the btrfs_file_extent_##name is aligned to @alignment |
| 100 | * Else return 1 |
| 101 | */ |
David Sterba | 033774d | 2019-03-20 15:59:22 +0100 | [diff] [blame] | 102 | #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \ |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 103 | ({ \ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 104 | if (unlikely(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), \ |
| 105 | (alignment)))) \ |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 106 | file_extent_err((leaf), (slot), \ |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 107 | "invalid %s for file extent, have %llu, should be aligned to %u", \ |
| 108 | (#name), btrfs_file_extent_##name((leaf), (fi)), \ |
| 109 | (alignment)); \ |
| 110 | (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \ |
| 111 | }) |
| 112 | |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 113 | static u64 file_extent_end(struct extent_buffer *leaf, |
| 114 | struct btrfs_key *key, |
| 115 | struct btrfs_file_extent_item *extent) |
| 116 | { |
| 117 | u64 end; |
| 118 | u64 len; |
| 119 | |
| 120 | if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) { |
| 121 | len = btrfs_file_extent_ram_bytes(leaf, extent); |
| 122 | end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); |
| 123 | } else { |
| 124 | len = btrfs_file_extent_num_bytes(leaf, extent); |
| 125 | end = key->offset + len; |
| 126 | } |
| 127 | return end; |
| 128 | } |
| 129 | |
Qu Wenruo | 80d7fd1 | 2019-10-04 17:31:32 +0800 | [diff] [blame] | 130 | /* |
| 131 | * Customized report for dir_item, the only new important information is |
| 132 | * key->objectid, which represents inode number |
| 133 | */ |
| 134 | __printf(3, 4) |
| 135 | __cold |
| 136 | static void dir_item_err(const struct extent_buffer *eb, int slot, |
| 137 | const char *fmt, ...) |
| 138 | { |
| 139 | const struct btrfs_fs_info *fs_info = eb->fs_info; |
| 140 | struct btrfs_key key; |
| 141 | struct va_format vaf; |
| 142 | va_list args; |
| 143 | |
| 144 | btrfs_item_key_to_cpu(eb, &key, slot); |
| 145 | va_start(args, fmt); |
| 146 | |
| 147 | vaf.fmt = fmt; |
| 148 | vaf.va = &args; |
| 149 | |
| 150 | btrfs_crit(fs_info, |
| 151 | "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV", |
| 152 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
| 153 | btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, |
| 154 | key.objectid, &vaf); |
| 155 | va_end(args); |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * This functions checks prev_key->objectid, to ensure current key and prev_key |
| 160 | * share the same objectid as inode number. |
| 161 | * |
| 162 | * This is to detect missing INODE_ITEM in subvolume trees. |
| 163 | * |
| 164 | * Return true if everything is OK or we don't need to check. |
| 165 | * Return false if anything is wrong. |
| 166 | */ |
| 167 | static bool check_prev_ino(struct extent_buffer *leaf, |
| 168 | struct btrfs_key *key, int slot, |
| 169 | struct btrfs_key *prev_key) |
| 170 | { |
| 171 | /* No prev key, skip check */ |
| 172 | if (slot == 0) |
| 173 | return true; |
| 174 | |
| 175 | /* Only these key->types needs to be checked */ |
| 176 | ASSERT(key->type == BTRFS_XATTR_ITEM_KEY || |
| 177 | key->type == BTRFS_INODE_REF_KEY || |
| 178 | key->type == BTRFS_DIR_INDEX_KEY || |
| 179 | key->type == BTRFS_DIR_ITEM_KEY || |
| 180 | key->type == BTRFS_EXTENT_DATA_KEY); |
| 181 | |
| 182 | /* |
| 183 | * Only subvolume trees along with their reloc trees need this check. |
| 184 | * Things like log tree doesn't follow this ino requirement. |
| 185 | */ |
| 186 | if (!is_fstree(btrfs_header_owner(leaf))) |
| 187 | return true; |
| 188 | |
| 189 | if (key->objectid == prev_key->objectid) |
| 190 | return true; |
| 191 | |
| 192 | /* Error found */ |
| 193 | dir_item_err(leaf, slot, |
| 194 | "invalid previous key objectid, have %llu expect %llu", |
| 195 | prev_key->objectid, key->objectid); |
| 196 | return false; |
| 197 | } |
David Sterba | ae2a19d | 2019-03-20 16:21:10 +0100 | [diff] [blame] | 198 | static int check_extent_data_item(struct extent_buffer *leaf, |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 199 | struct btrfs_key *key, int slot, |
| 200 | struct btrfs_key *prev_key) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 201 | { |
David Sterba | ae2a19d | 2019-03-20 16:21:10 +0100 | [diff] [blame] | 202 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 203 | struct btrfs_file_extent_item *fi; |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 204 | u32 sectorsize = fs_info->sectorsize; |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 205 | u32 item_size = btrfs_item_size(leaf, slot); |
Qu Wenruo | 4c094c3 | 2019-05-03 08:30:54 +0800 | [diff] [blame] | 206 | u64 extent_end; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 207 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 208 | if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 209 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 210 | "unaligned file_offset for file extent, have %llu should be aligned to %u", |
| 211 | key->offset, sectorsize); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 212 | return -EUCLEAN; |
| 213 | } |
| 214 | |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 215 | /* |
| 216 | * Previous key must have the same key->objectid (ino). |
| 217 | * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA. |
| 218 | * But if objectids mismatch, it means we have a missing |
| 219 | * INODE_ITEM. |
| 220 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 221 | if (unlikely(!check_prev_ino(leaf, key, slot, prev_key))) |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 222 | return -EUCLEAN; |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 223 | |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 224 | fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); |
| 225 | |
Qu Wenruo | 153a6d2 | 2019-09-03 07:46:19 +0800 | [diff] [blame] | 226 | /* |
| 227 | * Make sure the item contains at least inline header, so the file |
| 228 | * extent type is not some garbage. |
| 229 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 230 | if (unlikely(item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START)) { |
Qu Wenruo | 153a6d2 | 2019-09-03 07:46:19 +0800 | [diff] [blame] | 231 | file_extent_err(leaf, slot, |
Andreas Färber | 994bf9c | 2019-11-08 22:38:52 +0100 | [diff] [blame] | 232 | "invalid item size, have %u expect [%zu, %u)", |
Qu Wenruo | 153a6d2 | 2019-09-03 07:46:19 +0800 | [diff] [blame] | 233 | item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START, |
| 234 | SZ_4K); |
| 235 | return -EUCLEAN; |
| 236 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 237 | if (unlikely(btrfs_file_extent_type(leaf, fi) >= |
| 238 | BTRFS_NR_FILE_EXTENT_TYPES)) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 239 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 240 | "invalid type for file extent, have %u expect range [0, %u]", |
| 241 | btrfs_file_extent_type(leaf, fi), |
Chengguang Xu | b9b1a53 | 2019-10-10 15:59:58 +0800 | [diff] [blame] | 242 | BTRFS_NR_FILE_EXTENT_TYPES - 1); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 243 | return -EUCLEAN; |
| 244 | } |
| 245 | |
| 246 | /* |
Andrea Gelmini | 52042d8 | 2018-11-28 12:05:13 +0100 | [diff] [blame] | 247 | * Support for new compression/encryption must introduce incompat flag, |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 248 | * and must be caught in open_ctree(). |
| 249 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 250 | if (unlikely(btrfs_file_extent_compression(leaf, fi) >= |
| 251 | BTRFS_NR_COMPRESS_TYPES)) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 252 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 253 | "invalid compression for file extent, have %u expect range [0, %u]", |
| 254 | btrfs_file_extent_compression(leaf, fi), |
Chengguang Xu | ce96b7f | 2019-10-10 15:59:57 +0800 | [diff] [blame] | 255 | BTRFS_NR_COMPRESS_TYPES - 1); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 256 | return -EUCLEAN; |
| 257 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 258 | if (unlikely(btrfs_file_extent_encryption(leaf, fi))) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 259 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 260 | "invalid encryption for file extent, have %u expect 0", |
| 261 | btrfs_file_extent_encryption(leaf, fi)); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 262 | return -EUCLEAN; |
| 263 | } |
| 264 | if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { |
| 265 | /* Inline extent must have 0 as key offset */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 266 | if (unlikely(key->offset)) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 267 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 268 | "invalid file_offset for inline file extent, have %llu expect 0", |
| 269 | key->offset); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 270 | return -EUCLEAN; |
| 271 | } |
| 272 | |
| 273 | /* Compressed inline extent has no on-disk size, skip it */ |
| 274 | if (btrfs_file_extent_compression(leaf, fi) != |
| 275 | BTRFS_COMPRESS_NONE) |
| 276 | return 0; |
| 277 | |
| 278 | /* Uncompressed inline extent size must match item size */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 279 | if (unlikely(item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START + |
| 280 | btrfs_file_extent_ram_bytes(leaf, fi))) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 281 | file_extent_err(leaf, slot, |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 282 | "invalid ram_bytes for uncompressed inline extent, have %u expect %llu", |
| 283 | item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START + |
| 284 | btrfs_file_extent_ram_bytes(leaf, fi)); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 285 | return -EUCLEAN; |
| 286 | } |
| 287 | return 0; |
| 288 | } |
| 289 | |
| 290 | /* Regular or preallocated extent has fixed item size */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 291 | if (unlikely(item_size != sizeof(*fi))) { |
David Sterba | 1fd715f | 2019-03-20 15:32:46 +0100 | [diff] [blame] | 292 | file_extent_err(leaf, slot, |
Arnd Bergmann | 709a95c | 2017-10-13 11:27:35 +0200 | [diff] [blame] | 293 | "invalid item size for reg/prealloc file extent, have %u expect %zu", |
Qu Wenruo | 8806d71 | 2017-10-09 01:51:06 +0000 | [diff] [blame] | 294 | item_size, sizeof(*fi)); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 295 | return -EUCLEAN; |
| 296 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 297 | if (unlikely(CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) || |
| 298 | CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) || |
| 299 | CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) || |
| 300 | CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) || |
| 301 | CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 302 | return -EUCLEAN; |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 303 | |
Qu Wenruo | 4c094c3 | 2019-05-03 08:30:54 +0800 | [diff] [blame] | 304 | /* Catch extent end overflow */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 305 | if (unlikely(check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi), |
| 306 | key->offset, &extent_end))) { |
Qu Wenruo | 4c094c3 | 2019-05-03 08:30:54 +0800 | [diff] [blame] | 307 | file_extent_err(leaf, slot, |
| 308 | "extent end overflow, have file offset %llu extent num bytes %llu", |
| 309 | key->offset, |
| 310 | btrfs_file_extent_num_bytes(leaf, fi)); |
| 311 | return -EUCLEAN; |
| 312 | } |
| 313 | |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 314 | /* |
| 315 | * Check that no two consecutive file extent items, in the same leaf, |
| 316 | * present ranges that overlap each other. |
| 317 | */ |
| 318 | if (slot > 0 && |
| 319 | prev_key->objectid == key->objectid && |
| 320 | prev_key->type == BTRFS_EXTENT_DATA_KEY) { |
| 321 | struct btrfs_file_extent_item *prev_fi; |
| 322 | u64 prev_end; |
| 323 | |
| 324 | prev_fi = btrfs_item_ptr(leaf, slot - 1, |
| 325 | struct btrfs_file_extent_item); |
| 326 | prev_end = file_extent_end(leaf, prev_key, prev_fi); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 327 | if (unlikely(prev_end > key->offset)) { |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 328 | file_extent_err(leaf, slot - 1, |
| 329 | "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent", |
| 330 | prev_end, key->offset); |
| 331 | return -EUCLEAN; |
| 332 | } |
| 333 | } |
| 334 | |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 335 | return 0; |
| 336 | } |
| 337 | |
David Sterba | 68128ce | 2019-03-20 16:02:56 +0100 | [diff] [blame] | 338 | static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key, |
Filipe Manana | ad1d8c4 | 2019-12-02 11:01:03 +0000 | [diff] [blame] | 339 | int slot, struct btrfs_key *prev_key) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 340 | { |
David Sterba | 68128ce | 2019-03-20 16:02:56 +0100 | [diff] [blame] | 341 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 342 | u32 sectorsize = fs_info->sectorsize; |
David Sterba | 223486c | 2020-07-02 11:27:30 +0200 | [diff] [blame] | 343 | const u32 csumsize = fs_info->csum_size; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 344 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 345 | if (unlikely(key->objectid != BTRFS_EXTENT_CSUM_OBJECTID)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 346 | generic_err(leaf, slot, |
Qu Wenruo | d508c5f | 2017-10-09 01:51:05 +0000 | [diff] [blame] | 347 | "invalid key objectid for csum item, have %llu expect %llu", |
| 348 | key->objectid, BTRFS_EXTENT_CSUM_OBJECTID); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 349 | return -EUCLEAN; |
| 350 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 351 | if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 352 | generic_err(leaf, slot, |
Qu Wenruo | d508c5f | 2017-10-09 01:51:05 +0000 | [diff] [blame] | 353 | "unaligned key offset for csum item, have %llu should be aligned to %u", |
| 354 | key->offset, sectorsize); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 355 | return -EUCLEAN; |
| 356 | } |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 357 | if (unlikely(!IS_ALIGNED(btrfs_item_size(leaf, slot), csumsize))) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 358 | generic_err(leaf, slot, |
Qu Wenruo | d508c5f | 2017-10-09 01:51:05 +0000 | [diff] [blame] | 359 | "unaligned item size for csum item, have %u should be aligned to %u", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 360 | btrfs_item_size(leaf, slot), csumsize); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 361 | return -EUCLEAN; |
| 362 | } |
Filipe Manana | ad1d8c4 | 2019-12-02 11:01:03 +0000 | [diff] [blame] | 363 | if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) { |
| 364 | u64 prev_csum_end; |
| 365 | u32 prev_item_size; |
| 366 | |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 367 | prev_item_size = btrfs_item_size(leaf, slot - 1); |
Filipe Manana | ad1d8c4 | 2019-12-02 11:01:03 +0000 | [diff] [blame] | 368 | prev_csum_end = (prev_item_size / csumsize) * sectorsize; |
| 369 | prev_csum_end += prev_key->offset; |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 370 | if (unlikely(prev_csum_end > key->offset)) { |
Filipe Manana | ad1d8c4 | 2019-12-02 11:01:03 +0000 | [diff] [blame] | 371 | generic_err(leaf, slot - 1, |
| 372 | "csum end range (%llu) goes beyond the start range (%llu) of the next csum item", |
| 373 | prev_csum_end, key->offset); |
| 374 | return -EUCLEAN; |
| 375 | } |
| 376 | } |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 377 | return 0; |
| 378 | } |
| 379 | |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 380 | /* Inode item error output has the same format as dir_item_err() */ |
| 381 | #define inode_item_err(eb, slot, fmt, ...) \ |
| 382 | dir_item_err(eb, slot, fmt, __VA_ARGS__) |
| 383 | |
| 384 | static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key, |
| 385 | int slot) |
| 386 | { |
| 387 | struct btrfs_key item_key; |
| 388 | bool is_inode_item; |
| 389 | |
| 390 | btrfs_item_key_to_cpu(leaf, &item_key, slot); |
| 391 | is_inode_item = (item_key.type == BTRFS_INODE_ITEM_KEY); |
| 392 | |
| 393 | /* For XATTR_ITEM, location key should be all 0 */ |
| 394 | if (item_key.type == BTRFS_XATTR_ITEM_KEY) { |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 395 | if (unlikely(key->objectid != 0 || key->type != 0 || |
| 396 | key->offset != 0)) |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 397 | return -EUCLEAN; |
| 398 | return 0; |
| 399 | } |
| 400 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 401 | if (unlikely((key->objectid < BTRFS_FIRST_FREE_OBJECTID || |
| 402 | key->objectid > BTRFS_LAST_FREE_OBJECTID) && |
| 403 | key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID && |
| 404 | key->objectid != BTRFS_FREE_INO_OBJECTID)) { |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 405 | if (is_inode_item) { |
| 406 | generic_err(leaf, slot, |
| 407 | "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu", |
| 408 | key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, |
| 409 | BTRFS_FIRST_FREE_OBJECTID, |
| 410 | BTRFS_LAST_FREE_OBJECTID, |
| 411 | BTRFS_FREE_INO_OBJECTID); |
| 412 | } else { |
| 413 | dir_item_err(leaf, slot, |
| 414 | "invalid location key objectid: has %llu expect %llu or [%llu, %llu] or %llu", |
| 415 | key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID, |
| 416 | BTRFS_FIRST_FREE_OBJECTID, |
| 417 | BTRFS_LAST_FREE_OBJECTID, |
| 418 | BTRFS_FREE_INO_OBJECTID); |
| 419 | } |
| 420 | return -EUCLEAN; |
| 421 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 422 | if (unlikely(key->offset != 0)) { |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 423 | if (is_inode_item) |
| 424 | inode_item_err(leaf, slot, |
| 425 | "invalid key offset: has %llu expect 0", |
| 426 | key->offset); |
| 427 | else |
| 428 | dir_item_err(leaf, slot, |
| 429 | "invalid location key offset:has %llu expect 0", |
| 430 | key->offset); |
| 431 | return -EUCLEAN; |
| 432 | } |
| 433 | return 0; |
| 434 | } |
| 435 | |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 436 | static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, |
| 437 | int slot) |
| 438 | { |
| 439 | struct btrfs_key item_key; |
| 440 | bool is_root_item; |
| 441 | |
| 442 | btrfs_item_key_to_cpu(leaf, &item_key, slot); |
| 443 | is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY); |
| 444 | |
| 445 | /* No such tree id */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 446 | if (unlikely(key->objectid == 0)) { |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 447 | if (is_root_item) |
| 448 | generic_err(leaf, slot, "invalid root id 0"); |
| 449 | else |
| 450 | dir_item_err(leaf, slot, |
| 451 | "invalid location key root id 0"); |
| 452 | return -EUCLEAN; |
| 453 | } |
| 454 | |
| 455 | /* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 456 | if (unlikely(!is_fstree(key->objectid) && !is_root_item)) { |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 457 | dir_item_err(leaf, slot, |
| 458 | "invalid location key objectid, have %llu expect [%llu, %llu]", |
| 459 | key->objectid, BTRFS_FIRST_FREE_OBJECTID, |
| 460 | BTRFS_LAST_FREE_OBJECTID); |
| 461 | return -EUCLEAN; |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * ROOT_ITEM with non-zero offset means this is a snapshot, created at |
| 466 | * @offset transid. |
| 467 | * Furthermore, for location key in DIR_ITEM, its offset is always -1. |
| 468 | * |
| 469 | * So here we only check offset for reloc tree whose key->offset must |
| 470 | * be a valid tree. |
| 471 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 472 | if (unlikely(key->objectid == BTRFS_TREE_RELOC_OBJECTID && |
| 473 | key->offset == 0)) { |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 474 | generic_err(leaf, slot, "invalid root id 0 for reloc tree"); |
| 475 | return -EUCLEAN; |
| 476 | } |
| 477 | return 0; |
| 478 | } |
| 479 | |
David Sterba | ce4252c | 2019-03-20 16:17:46 +0100 | [diff] [blame] | 480 | static int check_dir_item(struct extent_buffer *leaf, |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 481 | struct btrfs_key *key, struct btrfs_key *prev_key, |
| 482 | int slot) |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 483 | { |
David Sterba | ce4252c | 2019-03-20 16:17:46 +0100 | [diff] [blame] | 484 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 485 | struct btrfs_dir_item *di; |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 486 | u32 item_size = btrfs_item_size(leaf, slot); |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 487 | u32 cur = 0; |
| 488 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 489 | if (unlikely(!check_prev_ino(leaf, key, slot, prev_key))) |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 490 | return -EUCLEAN; |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 491 | |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 492 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
| 493 | while (cur < item_size) { |
Qu Wenruo | 147a097 | 2019-12-09 18:54:35 +0800 | [diff] [blame] | 494 | struct btrfs_key location_key; |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 495 | u32 name_len; |
| 496 | u32 data_len; |
| 497 | u32 max_name_len; |
| 498 | u32 total_size; |
| 499 | u32 name_hash; |
| 500 | u8 dir_type; |
Qu Wenruo | 147a097 | 2019-12-09 18:54:35 +0800 | [diff] [blame] | 501 | int ret; |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 502 | |
| 503 | /* header itself should not cross item boundary */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 504 | if (unlikely(cur + sizeof(*di) > item_size)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 505 | dir_item_err(leaf, slot, |
Arnd Bergmann | 7cfad65 | 2017-12-06 15:18:14 +0100 | [diff] [blame] | 506 | "dir item header crosses item boundary, have %zu boundary %u", |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 507 | cur + sizeof(*di), item_size); |
| 508 | return -EUCLEAN; |
| 509 | } |
| 510 | |
Qu Wenruo | 147a097 | 2019-12-09 18:54:35 +0800 | [diff] [blame] | 511 | /* Location key check */ |
| 512 | btrfs_dir_item_key_to_cpu(leaf, di, &location_key); |
| 513 | if (location_key.type == BTRFS_ROOT_ITEM_KEY) { |
| 514 | ret = check_root_key(leaf, &location_key, slot); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 515 | if (unlikely(ret < 0)) |
Qu Wenruo | 147a097 | 2019-12-09 18:54:35 +0800 | [diff] [blame] | 516 | return ret; |
| 517 | } else if (location_key.type == BTRFS_INODE_ITEM_KEY || |
| 518 | location_key.type == 0) { |
| 519 | ret = check_inode_key(leaf, &location_key, slot); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 520 | if (unlikely(ret < 0)) |
Qu Wenruo | 147a097 | 2019-12-09 18:54:35 +0800 | [diff] [blame] | 521 | return ret; |
| 522 | } else { |
| 523 | dir_item_err(leaf, slot, |
| 524 | "invalid location key type, have %u, expect %u or %u", |
| 525 | location_key.type, BTRFS_ROOT_ITEM_KEY, |
| 526 | BTRFS_INODE_ITEM_KEY); |
| 527 | return -EUCLEAN; |
| 528 | } |
| 529 | |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 530 | /* dir type check */ |
| 531 | dir_type = btrfs_dir_type(leaf, di); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 532 | if (unlikely(dir_type >= BTRFS_FT_MAX)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 533 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 534 | "invalid dir item type, have %u expect [0, %u)", |
| 535 | dir_type, BTRFS_FT_MAX); |
| 536 | return -EUCLEAN; |
| 537 | } |
| 538 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 539 | if (unlikely(key->type == BTRFS_XATTR_ITEM_KEY && |
| 540 | dir_type != BTRFS_FT_XATTR)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 541 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 542 | "invalid dir item type for XATTR key, have %u expect %u", |
| 543 | dir_type, BTRFS_FT_XATTR); |
| 544 | return -EUCLEAN; |
| 545 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 546 | if (unlikely(dir_type == BTRFS_FT_XATTR && |
| 547 | key->type != BTRFS_XATTR_ITEM_KEY)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 548 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 549 | "xattr dir type found for non-XATTR key"); |
| 550 | return -EUCLEAN; |
| 551 | } |
| 552 | if (dir_type == BTRFS_FT_XATTR) |
| 553 | max_name_len = XATTR_NAME_MAX; |
| 554 | else |
| 555 | max_name_len = BTRFS_NAME_LEN; |
| 556 | |
| 557 | /* Name/data length check */ |
| 558 | name_len = btrfs_dir_name_len(leaf, di); |
| 559 | data_len = btrfs_dir_data_len(leaf, di); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 560 | if (unlikely(name_len > max_name_len)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 561 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 562 | "dir item name len too long, have %u max %u", |
| 563 | name_len, max_name_len); |
| 564 | return -EUCLEAN; |
| 565 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 566 | if (unlikely(name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info))) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 567 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 568 | "dir item name and data len too long, have %u max %u", |
| 569 | name_len + data_len, |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 570 | BTRFS_MAX_XATTR_SIZE(fs_info)); |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 571 | return -EUCLEAN; |
| 572 | } |
| 573 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 574 | if (unlikely(data_len && dir_type != BTRFS_FT_XATTR)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 575 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 576 | "dir item with invalid data len, have %u expect 0", |
| 577 | data_len); |
| 578 | return -EUCLEAN; |
| 579 | } |
| 580 | |
| 581 | total_size = sizeof(*di) + name_len + data_len; |
| 582 | |
| 583 | /* header and name/data should not cross item boundary */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 584 | if (unlikely(cur + total_size > item_size)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 585 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 586 | "dir item data crosses item boundary, have %u boundary %u", |
| 587 | cur + total_size, item_size); |
| 588 | return -EUCLEAN; |
| 589 | } |
| 590 | |
| 591 | /* |
| 592 | * Special check for XATTR/DIR_ITEM, as key->offset is name |
| 593 | * hash, should match its name |
| 594 | */ |
| 595 | if (key->type == BTRFS_DIR_ITEM_KEY || |
| 596 | key->type == BTRFS_XATTR_ITEM_KEY) { |
David Sterba | e2683fc | 2018-01-10 15:13:07 +0100 | [diff] [blame] | 597 | char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)]; |
| 598 | |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 599 | read_extent_buffer(leaf, namebuf, |
| 600 | (unsigned long)(di + 1), name_len); |
| 601 | name_hash = btrfs_name_hash(namebuf, name_len); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 602 | if (unlikely(key->offset != name_hash)) { |
David Sterba | d98ced6 | 2019-03-20 16:07:27 +0100 | [diff] [blame] | 603 | dir_item_err(leaf, slot, |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 604 | "name hash mismatch with key, have 0x%016x expect 0x%016llx", |
| 605 | name_hash, key->offset); |
| 606 | return -EUCLEAN; |
| 607 | } |
| 608 | } |
| 609 | cur += total_size; |
| 610 | di = (struct btrfs_dir_item *)((void *)di + total_size); |
| 611 | } |
| 612 | return 0; |
| 613 | } |
| 614 | |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 615 | __printf(3, 4) |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 616 | __cold |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 617 | static void block_group_err(const struct extent_buffer *eb, int slot, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 618 | const char *fmt, ...) |
| 619 | { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 620 | const struct btrfs_fs_info *fs_info = eb->fs_info; |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 621 | struct btrfs_key key; |
| 622 | struct va_format vaf; |
| 623 | va_list args; |
| 624 | |
| 625 | btrfs_item_key_to_cpu(eb, &key, slot); |
| 626 | va_start(args, fmt); |
| 627 | |
| 628 | vaf.fmt = fmt; |
| 629 | vaf.va = &args; |
| 630 | |
| 631 | btrfs_crit(fs_info, |
| 632 | "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV", |
| 633 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
| 634 | btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, |
| 635 | key.objectid, key.offset, &vaf); |
| 636 | va_end(args); |
| 637 | } |
| 638 | |
David Sterba | af60ce2 | 2019-03-20 16:19:31 +0100 | [diff] [blame] | 639 | static int check_block_group_item(struct extent_buffer *leaf, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 640 | struct btrfs_key *key, int slot) |
| 641 | { |
| 642 | struct btrfs_block_group_item bgi; |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 643 | u32 item_size = btrfs_item_size(leaf, slot); |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 644 | u64 flags; |
| 645 | u64 type; |
| 646 | |
| 647 | /* |
| 648 | * Here we don't really care about alignment since extent allocator can |
Qu Wenruo | 1095092 | 2018-11-23 09:06:36 +0800 | [diff] [blame] | 649 | * handle it. We care more about the size. |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 650 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 651 | if (unlikely(key->offset == 0)) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 652 | block_group_err(leaf, slot, |
Qu Wenruo | 1095092 | 2018-11-23 09:06:36 +0800 | [diff] [blame] | 653 | "invalid block group size 0"); |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 654 | return -EUCLEAN; |
| 655 | } |
| 656 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 657 | if (unlikely(item_size != sizeof(bgi))) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 658 | block_group_err(leaf, slot, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 659 | "invalid item size, have %u expect %zu", |
| 660 | item_size, sizeof(bgi)); |
| 661 | return -EUCLEAN; |
| 662 | } |
| 663 | |
| 664 | read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), |
| 665 | sizeof(bgi)); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 666 | if (unlikely(btrfs_stack_block_group_chunk_objectid(&bgi) != |
| 667 | BTRFS_FIRST_CHUNK_TREE_OBJECTID)) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 668 | block_group_err(leaf, slot, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 669 | "invalid block group chunk objectid, have %llu expect %llu", |
David Sterba | de0dc45 | 2019-10-23 18:48:18 +0200 | [diff] [blame] | 670 | btrfs_stack_block_group_chunk_objectid(&bgi), |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 671 | BTRFS_FIRST_CHUNK_TREE_OBJECTID); |
| 672 | return -EUCLEAN; |
| 673 | } |
| 674 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 675 | if (unlikely(btrfs_stack_block_group_used(&bgi) > key->offset)) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 676 | block_group_err(leaf, slot, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 677 | "invalid block group used, have %llu expect [0, %llu)", |
David Sterba | de0dc45 | 2019-10-23 18:48:18 +0200 | [diff] [blame] | 678 | btrfs_stack_block_group_used(&bgi), key->offset); |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 679 | return -EUCLEAN; |
| 680 | } |
| 681 | |
David Sterba | de0dc45 | 2019-10-23 18:48:18 +0200 | [diff] [blame] | 682 | flags = btrfs_stack_block_group_flags(&bgi); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 683 | if (unlikely(hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1)) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 684 | block_group_err(leaf, slot, |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 685 | "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set", |
| 686 | flags & BTRFS_BLOCK_GROUP_PROFILE_MASK, |
| 687 | hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)); |
| 688 | return -EUCLEAN; |
| 689 | } |
| 690 | |
| 691 | type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 692 | if (unlikely(type != BTRFS_BLOCK_GROUP_DATA && |
| 693 | type != BTRFS_BLOCK_GROUP_METADATA && |
| 694 | type != BTRFS_BLOCK_GROUP_SYSTEM && |
| 695 | type != (BTRFS_BLOCK_GROUP_METADATA | |
| 696 | BTRFS_BLOCK_GROUP_DATA))) { |
David Sterba | 4806bd8 | 2019-03-20 16:18:57 +0100 | [diff] [blame] | 697 | block_group_err(leaf, slot, |
Shaokun Zhang | 761333f | 2018-11-05 18:49:09 +0800 | [diff] [blame] | 698 | "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx", |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 699 | type, hweight64(type), |
| 700 | BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA, |
| 701 | BTRFS_BLOCK_GROUP_SYSTEM, |
| 702 | BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA); |
| 703 | return -EUCLEAN; |
| 704 | } |
| 705 | return 0; |
| 706 | } |
| 707 | |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 708 | __printf(4, 5) |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 709 | __cold |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 710 | static void chunk_err(const struct extent_buffer *leaf, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 711 | const struct btrfs_chunk *chunk, u64 logical, |
| 712 | const char *fmt, ...) |
| 713 | { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 714 | const struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 715 | bool is_sb; |
| 716 | struct va_format vaf; |
| 717 | va_list args; |
| 718 | int i; |
| 719 | int slot = -1; |
| 720 | |
| 721 | /* Only superblock eb is able to have such small offset */ |
| 722 | is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET); |
| 723 | |
| 724 | if (!is_sb) { |
| 725 | /* |
| 726 | * Get the slot number by iterating through all slots, this |
| 727 | * would provide better readability. |
| 728 | */ |
| 729 | for (i = 0; i < btrfs_header_nritems(leaf); i++) { |
| 730 | if (btrfs_item_ptr_offset(leaf, i) == |
| 731 | (unsigned long)chunk) { |
| 732 | slot = i; |
| 733 | break; |
| 734 | } |
| 735 | } |
| 736 | } |
| 737 | va_start(args, fmt); |
| 738 | vaf.fmt = fmt; |
| 739 | vaf.va = &args; |
| 740 | |
| 741 | if (is_sb) |
| 742 | btrfs_crit(fs_info, |
| 743 | "corrupt superblock syschunk array: chunk_start=%llu, %pV", |
| 744 | logical, &vaf); |
| 745 | else |
| 746 | btrfs_crit(fs_info, |
| 747 | "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV", |
| 748 | BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot, |
| 749 | logical, &vaf); |
| 750 | va_end(args); |
| 751 | } |
| 752 | |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 753 | /* |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 754 | * The common chunk check which could also work on super block sys chunk array. |
| 755 | * |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 756 | * Return -EUCLEAN if anything is corrupted. |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 757 | * Return 0 if everything is OK. |
| 758 | */ |
David Sterba | ddaf1d5 | 2019-03-20 16:40:48 +0100 | [diff] [blame] | 759 | int btrfs_check_chunk_valid(struct extent_buffer *leaf, |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 760 | struct btrfs_chunk *chunk, u64 logical) |
| 761 | { |
David Sterba | ddaf1d5 | 2019-03-20 16:40:48 +0100 | [diff] [blame] | 762 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 763 | u64 length; |
Su Yue | 347fb0c | 2021-01-03 17:28:04 +0800 | [diff] [blame] | 764 | u64 chunk_end; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 765 | u64 stripe_len; |
| 766 | u16 num_stripes; |
| 767 | u16 sub_stripes; |
| 768 | u64 type; |
| 769 | u64 features; |
| 770 | bool mixed = false; |
Daniel Xu | 85d07fb | 2020-10-08 18:09:10 -0700 | [diff] [blame] | 771 | int raid_index; |
| 772 | int nparity; |
| 773 | int ncopies; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 774 | |
| 775 | length = btrfs_chunk_length(leaf, chunk); |
| 776 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); |
| 777 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
| 778 | sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); |
| 779 | type = btrfs_chunk_type(leaf, chunk); |
Daniel Xu | 85d07fb | 2020-10-08 18:09:10 -0700 | [diff] [blame] | 780 | raid_index = btrfs_bg_flags_to_raid_index(type); |
| 781 | ncopies = btrfs_raid_array[raid_index].ncopies; |
| 782 | nparity = btrfs_raid_array[raid_index].nparity; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 783 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 784 | if (unlikely(!num_stripes)) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 785 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 786 | "invalid chunk num_stripes, have %u", num_stripes); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 787 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 788 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 789 | if (unlikely(num_stripes < ncopies)) { |
Daniel Xu | 85d07fb | 2020-10-08 18:09:10 -0700 | [diff] [blame] | 790 | chunk_err(leaf, chunk, logical, |
| 791 | "invalid chunk num_stripes < ncopies, have %u < %d", |
| 792 | num_stripes, ncopies); |
| 793 | return -EUCLEAN; |
| 794 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 795 | if (unlikely(nparity && num_stripes == nparity)) { |
Daniel Xu | 85d07fb | 2020-10-08 18:09:10 -0700 | [diff] [blame] | 796 | chunk_err(leaf, chunk, logical, |
| 797 | "invalid chunk num_stripes == nparity, have %u == %d", |
| 798 | num_stripes, nparity); |
| 799 | return -EUCLEAN; |
| 800 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 801 | if (unlikely(!IS_ALIGNED(logical, fs_info->sectorsize))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 802 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 803 | "invalid chunk logical, have %llu should aligned to %u", |
| 804 | logical, fs_info->sectorsize); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 805 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 806 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 807 | if (unlikely(btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize)) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 808 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 809 | "invalid chunk sectorsize, have %u expect %u", |
| 810 | btrfs_chunk_sector_size(leaf, chunk), |
| 811 | fs_info->sectorsize); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 812 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 813 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 814 | if (unlikely(!length || !IS_ALIGNED(length, fs_info->sectorsize))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 815 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 816 | "invalid chunk length, have %llu", length); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 817 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 818 | } |
Su Yue | 347fb0c | 2021-01-03 17:28:04 +0800 | [diff] [blame] | 819 | if (unlikely(check_add_overflow(logical, length, &chunk_end))) { |
| 820 | chunk_err(leaf, chunk, logical, |
| 821 | "invalid chunk logical start and length, have logical start %llu length %llu", |
| 822 | logical, length); |
| 823 | return -EUCLEAN; |
| 824 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 825 | if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 826 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 827 | "invalid chunk stripe length: %llu", |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 828 | stripe_len); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 829 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 830 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 831 | if (unlikely(type & ~(BTRFS_BLOCK_GROUP_TYPE_MASK | |
| 832 | BTRFS_BLOCK_GROUP_PROFILE_MASK))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 833 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 834 | "unrecognized chunk type: 0x%llx", |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 835 | ~(BTRFS_BLOCK_GROUP_TYPE_MASK | |
| 836 | BTRFS_BLOCK_GROUP_PROFILE_MASK) & |
| 837 | btrfs_chunk_type(leaf, chunk)); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 838 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 839 | } |
| 840 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 841 | if (unlikely(!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && |
| 842 | (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0)) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 843 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | 80e46cf | 2019-03-13 12:17:50 +0800 | [diff] [blame] | 844 | "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", |
| 845 | type & BTRFS_BLOCK_GROUP_PROFILE_MASK); |
| 846 | return -EUCLEAN; |
| 847 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 848 | if (unlikely((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0)) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 849 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 850 | "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx", |
| 851 | type, BTRFS_BLOCK_GROUP_TYPE_MASK); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 852 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 853 | } |
| 854 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 855 | if (unlikely((type & BTRFS_BLOCK_GROUP_SYSTEM) && |
| 856 | (type & (BTRFS_BLOCK_GROUP_METADATA | |
| 857 | BTRFS_BLOCK_GROUP_DATA)))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 858 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | f114024 | 2019-03-20 13:36:06 +0800 | [diff] [blame] | 859 | "system chunk with data or metadata type: 0x%llx", |
| 860 | type); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 861 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 862 | } |
| 863 | |
| 864 | features = btrfs_super_incompat_flags(fs_info->super_copy); |
| 865 | if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) |
| 866 | mixed = true; |
| 867 | |
| 868 | if (!mixed) { |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 869 | if (unlikely((type & BTRFS_BLOCK_GROUP_METADATA) && |
| 870 | (type & BTRFS_BLOCK_GROUP_DATA))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 871 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 872 | "mixed chunk type in non-mixed mode: 0x%llx", type); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 873 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 874 | } |
| 875 | } |
| 876 | |
David Sterba | 0ac6e06 | 2021-07-26 14:15:15 +0200 | [diff] [blame] | 877 | if (unlikely((type & BTRFS_BLOCK_GROUP_RAID10 && |
| 878 | sub_stripes != btrfs_raid_array[BTRFS_RAID_RAID10].sub_stripes) || |
| 879 | (type & BTRFS_BLOCK_GROUP_RAID1 && |
| 880 | num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1].devs_min) || |
David Sterba | 6c154ba | 2021-07-26 14:15:17 +0200 | [diff] [blame] | 881 | (type & BTRFS_BLOCK_GROUP_RAID1C3 && |
| 882 | num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C3].devs_min) || |
| 883 | (type & BTRFS_BLOCK_GROUP_RAID1C4 && |
| 884 | num_stripes != btrfs_raid_array[BTRFS_RAID_RAID1C4].devs_min) || |
David Sterba | 0ac6e06 | 2021-07-26 14:15:15 +0200 | [diff] [blame] | 885 | (type & BTRFS_BLOCK_GROUP_RAID5 && |
| 886 | num_stripes < btrfs_raid_array[BTRFS_RAID_RAID5].devs_min) || |
| 887 | (type & BTRFS_BLOCK_GROUP_RAID6 && |
| 888 | num_stripes < btrfs_raid_array[BTRFS_RAID_RAID6].devs_min) || |
| 889 | (type & BTRFS_BLOCK_GROUP_DUP && |
| 890 | num_stripes != btrfs_raid_array[BTRFS_RAID_DUP].dev_stripes) || |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 891 | ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && |
David Sterba | 0ac6e06 | 2021-07-26 14:15:15 +0200 | [diff] [blame] | 892 | num_stripes != btrfs_raid_array[BTRFS_RAID_SINGLE].dev_stripes))) { |
David Sterba | d001e4a | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 893 | chunk_err(leaf, chunk, logical, |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 894 | "invalid num_stripes:sub_stripes %u:%u for profile %llu", |
| 895 | num_stripes, sub_stripes, |
| 896 | type & BTRFS_BLOCK_GROUP_PROFILE_MASK); |
Qu Wenruo | bf871c3 | 2019-03-20 13:39:14 +0800 | [diff] [blame] | 897 | return -EUCLEAN; |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | return 0; |
| 901 | } |
| 902 | |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 903 | /* |
| 904 | * Enhanced version of chunk item checker. |
| 905 | * |
| 906 | * The common btrfs_check_chunk_valid() doesn't check item size since it needs |
| 907 | * to work on super block sys_chunk_array which doesn't have full item ptr. |
| 908 | */ |
| 909 | static int check_leaf_chunk_item(struct extent_buffer *leaf, |
| 910 | struct btrfs_chunk *chunk, |
| 911 | struct btrfs_key *key, int slot) |
| 912 | { |
| 913 | int num_stripes; |
| 914 | |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 915 | if (unlikely(btrfs_item_size(leaf, slot) < sizeof(struct btrfs_chunk))) { |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 916 | chunk_err(leaf, chunk, key->offset, |
| 917 | "invalid chunk item size: have %u expect [%zu, %u)", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 918 | btrfs_item_size(leaf, slot), |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 919 | sizeof(struct btrfs_chunk), |
| 920 | BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); |
| 921 | return -EUCLEAN; |
| 922 | } |
| 923 | |
| 924 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
| 925 | /* Let btrfs_check_chunk_valid() handle this error type */ |
| 926 | if (num_stripes == 0) |
| 927 | goto out; |
| 928 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 929 | if (unlikely(btrfs_chunk_item_size(num_stripes) != |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 930 | btrfs_item_size(leaf, slot))) { |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 931 | chunk_err(leaf, chunk, key->offset, |
| 932 | "invalid chunk item size: have %u expect %lu", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 933 | btrfs_item_size(leaf, slot), |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 934 | btrfs_chunk_item_size(num_stripes)); |
| 935 | return -EUCLEAN; |
| 936 | } |
| 937 | out: |
| 938 | return btrfs_check_chunk_valid(leaf, chunk, key->offset); |
| 939 | } |
| 940 | |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 941 | __printf(3, 4) |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 942 | __cold |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 943 | static void dev_item_err(const struct extent_buffer *eb, int slot, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 944 | const char *fmt, ...) |
| 945 | { |
| 946 | struct btrfs_key key; |
| 947 | struct va_format vaf; |
| 948 | va_list args; |
| 949 | |
| 950 | btrfs_item_key_to_cpu(eb, &key, slot); |
| 951 | va_start(args, fmt); |
| 952 | |
| 953 | vaf.fmt = fmt; |
| 954 | vaf.va = &args; |
| 955 | |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 956 | btrfs_crit(eb->fs_info, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 957 | "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV", |
| 958 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
| 959 | btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, |
| 960 | key.objectid, &vaf); |
| 961 | va_end(args); |
| 962 | } |
| 963 | |
David Sterba | 412a231 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 964 | static int check_dev_item(struct extent_buffer *leaf, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 965 | struct btrfs_key *key, int slot) |
| 966 | { |
| 967 | struct btrfs_dev_item *ditem; |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 968 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 969 | if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) { |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 970 | dev_item_err(leaf, slot, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 971 | "invalid objectid: has=%llu expect=%llu", |
| 972 | key->objectid, BTRFS_DEV_ITEMS_OBJECTID); |
| 973 | return -EUCLEAN; |
| 974 | } |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 975 | ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 976 | if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) { |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 977 | dev_item_err(leaf, slot, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 978 | "devid mismatch: key has=%llu item has=%llu", |
| 979 | key->offset, btrfs_device_id(leaf, ditem)); |
| 980 | return -EUCLEAN; |
| 981 | } |
| 982 | |
| 983 | /* |
| 984 | * For device total_bytes, we don't have reliable way to check it, as |
| 985 | * it can be 0 for device removal. Device size check can only be done |
| 986 | * by dev extents check. |
| 987 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 988 | if (unlikely(btrfs_device_bytes_used(leaf, ditem) > |
| 989 | btrfs_device_total_bytes(leaf, ditem))) { |
David Sterba | 5617ed8 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 990 | dev_item_err(leaf, slot, |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 991 | "invalid bytes used: have %llu expect [0, %llu]", |
| 992 | btrfs_device_bytes_used(leaf, ditem), |
| 993 | btrfs_device_total_bytes(leaf, ditem)); |
| 994 | return -EUCLEAN; |
| 995 | } |
| 996 | /* |
| 997 | * Remaining members like io_align/type/gen/dev_group aren't really |
| 998 | * utilized. Skip them to make later usage of them easier. |
| 999 | */ |
| 1000 | return 0; |
| 1001 | } |
| 1002 | |
David Sterba | 39e57f4 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1003 | static int check_inode_item(struct extent_buffer *leaf, |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1004 | struct btrfs_key *key, int slot) |
| 1005 | { |
David Sterba | 39e57f4 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1006 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1007 | struct btrfs_inode_item *iitem; |
| 1008 | u64 super_gen = btrfs_super_generation(fs_info->super_copy); |
| 1009 | u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); |
Su Yue | 0c98294 | 2022-01-21 17:33:34 +0800 | [diff] [blame^] | 1010 | const u32 item_size = btrfs_item_size(leaf, slot); |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1011 | u32 mode; |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 1012 | int ret; |
Boris Burkov | 77eea05 | 2021-06-30 13:01:48 -0700 | [diff] [blame] | 1013 | u32 flags; |
| 1014 | u32 ro_flags; |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1015 | |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 1016 | ret = check_inode_key(leaf, key, slot); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1017 | if (unlikely(ret < 0)) |
Qu Wenruo | c23c77b | 2019-12-09 18:54:33 +0800 | [diff] [blame] | 1018 | return ret; |
| 1019 | |
Su Yue | 0c98294 | 2022-01-21 17:33:34 +0800 | [diff] [blame^] | 1020 | if (unlikely(item_size != sizeof(*iitem))) { |
| 1021 | generic_err(leaf, slot, "invalid item size: has %u expect %zu", |
| 1022 | item_size, sizeof(*iitem)); |
| 1023 | return -EUCLEAN; |
| 1024 | } |
| 1025 | |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1026 | iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); |
| 1027 | |
| 1028 | /* Here we use super block generation + 1 to handle log tree */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1029 | if (unlikely(btrfs_inode_generation(leaf, iitem) > super_gen + 1)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1030 | inode_item_err(leaf, slot, |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1031 | "invalid inode generation: has %llu expect (0, %llu]", |
| 1032 | btrfs_inode_generation(leaf, iitem), |
| 1033 | super_gen + 1); |
| 1034 | return -EUCLEAN; |
| 1035 | } |
| 1036 | /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1037 | if (unlikely(btrfs_inode_transid(leaf, iitem) > super_gen + 1)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1038 | inode_item_err(leaf, slot, |
Qu Wenruo | f96d696 | 2020-08-25 21:42:51 +0800 | [diff] [blame] | 1039 | "invalid inode transid: has %llu expect [0, %llu]", |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1040 | btrfs_inode_transid(leaf, iitem), super_gen + 1); |
| 1041 | return -EUCLEAN; |
| 1042 | } |
| 1043 | |
| 1044 | /* |
| 1045 | * For size and nbytes it's better not to be too strict, as for dir |
| 1046 | * item its size/nbytes can easily get wrong, but doesn't affect |
| 1047 | * anything in the fs. So here we skip the check. |
| 1048 | */ |
| 1049 | mode = btrfs_inode_mode(leaf, iitem); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1050 | if (unlikely(mode & ~valid_mask)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1051 | inode_item_err(leaf, slot, |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1052 | "unknown mode bit detected: 0x%x", |
| 1053 | mode & ~valid_mask); |
| 1054 | return -EUCLEAN; |
| 1055 | } |
| 1056 | |
| 1057 | /* |
David Sterba | c149916 | 2019-10-01 19:44:42 +0200 | [diff] [blame] | 1058 | * S_IFMT is not bit mapped so we can't completely rely on |
| 1059 | * is_power_of_2/has_single_bit_set, but it can save us from checking |
| 1060 | * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1061 | */ |
David Sterba | c149916 | 2019-10-01 19:44:42 +0200 | [diff] [blame] | 1062 | if (!has_single_bit_set(mode & S_IFMT)) { |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1063 | if (unlikely(!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode))) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1064 | inode_item_err(leaf, slot, |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1065 | "invalid mode: has 0%o expect valid S_IF* bit(s)", |
| 1066 | mode & S_IFMT); |
| 1067 | return -EUCLEAN; |
| 1068 | } |
| 1069 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1070 | if (unlikely(S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1071 | inode_item_err(leaf, slot, |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1072 | "invalid nlink: has %u expect no more than 1 for dir", |
| 1073 | btrfs_inode_nlink(leaf, iitem)); |
| 1074 | return -EUCLEAN; |
| 1075 | } |
Boris Burkov | 77eea05 | 2021-06-30 13:01:48 -0700 | [diff] [blame] | 1076 | btrfs_inode_split_flags(btrfs_inode_flags(leaf, iitem), &flags, &ro_flags); |
| 1077 | if (unlikely(flags & ~BTRFS_INODE_FLAG_MASK)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1078 | inode_item_err(leaf, slot, |
Boris Burkov | 77eea05 | 2021-06-30 13:01:48 -0700 | [diff] [blame] | 1079 | "unknown incompat flags detected: 0x%x", flags); |
| 1080 | return -EUCLEAN; |
| 1081 | } |
| 1082 | if (unlikely(!sb_rdonly(fs_info->sb) && |
| 1083 | (ro_flags & ~BTRFS_INODE_RO_FLAG_MASK))) { |
| 1084 | inode_item_err(leaf, slot, |
| 1085 | "unknown ro-compat flags detected on writeable mount: 0x%x", |
| 1086 | ro_flags); |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1087 | return -EUCLEAN; |
| 1088 | } |
| 1089 | return 0; |
| 1090 | } |
| 1091 | |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1092 | static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key, |
| 1093 | int slot) |
| 1094 | { |
| 1095 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 1465af1 | 2020-09-22 10:37:01 +0800 | [diff] [blame] | 1096 | struct btrfs_root_item ri = { 0 }; |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1097 | const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY | |
| 1098 | BTRFS_ROOT_SUBVOL_DEAD; |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 1099 | int ret; |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1100 | |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 1101 | ret = check_root_key(leaf, key, slot); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1102 | if (unlikely(ret < 0)) |
Qu Wenruo | 57a0e67 | 2019-12-09 18:54:34 +0800 | [diff] [blame] | 1103 | return ret; |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1104 | |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1105 | if (unlikely(btrfs_item_size(leaf, slot) != sizeof(ri) && |
| 1106 | btrfs_item_size(leaf, slot) != |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1107 | btrfs_legacy_root_item_size())) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1108 | generic_err(leaf, slot, |
Qu Wenruo | 1465af1 | 2020-09-22 10:37:01 +0800 | [diff] [blame] | 1109 | "invalid root item size, have %u expect %zu or %u", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1110 | btrfs_item_size(leaf, slot), sizeof(ri), |
Qu Wenruo | 1465af1 | 2020-09-22 10:37:01 +0800 | [diff] [blame] | 1111 | btrfs_legacy_root_item_size()); |
Daniel Xu | 1a49a97 | 2020-11-12 17:55:06 -0800 | [diff] [blame] | 1112 | return -EUCLEAN; |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1113 | } |
| 1114 | |
Qu Wenruo | 1465af1 | 2020-09-22 10:37:01 +0800 | [diff] [blame] | 1115 | /* |
| 1116 | * For legacy root item, the members starting at generation_v2 will be |
| 1117 | * all filled with 0. |
| 1118 | * And since we allow geneartion_v2 as 0, it will still pass the check. |
| 1119 | */ |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1120 | read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot), |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1121 | btrfs_item_size(leaf, slot)); |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1122 | |
| 1123 | /* Generation related */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1124 | if (unlikely(btrfs_root_generation(&ri) > |
| 1125 | btrfs_super_generation(fs_info->super_copy) + 1)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1126 | generic_err(leaf, slot, |
| 1127 | "invalid root generation, have %llu expect (0, %llu]", |
| 1128 | btrfs_root_generation(&ri), |
| 1129 | btrfs_super_generation(fs_info->super_copy) + 1); |
| 1130 | return -EUCLEAN; |
| 1131 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1132 | if (unlikely(btrfs_root_generation_v2(&ri) > |
| 1133 | btrfs_super_generation(fs_info->super_copy) + 1)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1134 | generic_err(leaf, slot, |
| 1135 | "invalid root v2 generation, have %llu expect (0, %llu]", |
| 1136 | btrfs_root_generation_v2(&ri), |
| 1137 | btrfs_super_generation(fs_info->super_copy) + 1); |
| 1138 | return -EUCLEAN; |
| 1139 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1140 | if (unlikely(btrfs_root_last_snapshot(&ri) > |
| 1141 | btrfs_super_generation(fs_info->super_copy) + 1)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1142 | generic_err(leaf, slot, |
| 1143 | "invalid root last_snapshot, have %llu expect (0, %llu]", |
| 1144 | btrfs_root_last_snapshot(&ri), |
| 1145 | btrfs_super_generation(fs_info->super_copy) + 1); |
| 1146 | return -EUCLEAN; |
| 1147 | } |
| 1148 | |
| 1149 | /* Alignment and level check */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1150 | if (unlikely(!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize))) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1151 | generic_err(leaf, slot, |
| 1152 | "invalid root bytenr, have %llu expect to be aligned to %u", |
| 1153 | btrfs_root_bytenr(&ri), fs_info->sectorsize); |
| 1154 | return -EUCLEAN; |
| 1155 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1156 | if (unlikely(btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1157 | generic_err(leaf, slot, |
| 1158 | "invalid root level, have %u expect [0, %u]", |
| 1159 | btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1); |
| 1160 | return -EUCLEAN; |
| 1161 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1162 | if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1163 | generic_err(leaf, slot, |
| 1164 | "invalid root level, have %u expect [0, %u]", |
David Sterba | c842268 | 2020-09-15 21:44:52 +0200 | [diff] [blame] | 1165 | btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1); |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1166 | return -EUCLEAN; |
| 1167 | } |
| 1168 | |
| 1169 | /* Flags check */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1170 | if (unlikely(btrfs_root_flags(&ri) & ~valid_root_flags)) { |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1171 | generic_err(leaf, slot, |
| 1172 | "invalid root flags, have 0x%llx expect mask 0x%llx", |
| 1173 | btrfs_root_flags(&ri), valid_root_flags); |
| 1174 | return -EUCLEAN; |
| 1175 | } |
| 1176 | return 0; |
| 1177 | } |
| 1178 | |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1179 | __printf(3,4) |
| 1180 | __cold |
| 1181 | static void extent_err(const struct extent_buffer *eb, int slot, |
| 1182 | const char *fmt, ...) |
| 1183 | { |
| 1184 | struct btrfs_key key; |
| 1185 | struct va_format vaf; |
| 1186 | va_list args; |
| 1187 | u64 bytenr; |
| 1188 | u64 len; |
| 1189 | |
| 1190 | btrfs_item_key_to_cpu(eb, &key, slot); |
| 1191 | bytenr = key.objectid; |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1192 | if (key.type == BTRFS_METADATA_ITEM_KEY || |
| 1193 | key.type == BTRFS_TREE_BLOCK_REF_KEY || |
| 1194 | key.type == BTRFS_SHARED_BLOCK_REF_KEY) |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1195 | len = eb->fs_info->nodesize; |
| 1196 | else |
| 1197 | len = key.offset; |
| 1198 | va_start(args, fmt); |
| 1199 | |
| 1200 | vaf.fmt = fmt; |
| 1201 | vaf.va = &args; |
| 1202 | |
| 1203 | btrfs_crit(eb->fs_info, |
| 1204 | "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV", |
| 1205 | btrfs_header_level(eb) == 0 ? "leaf" : "node", |
| 1206 | eb->start, slot, bytenr, len, &vaf); |
| 1207 | va_end(args); |
| 1208 | } |
| 1209 | |
| 1210 | static int check_extent_item(struct extent_buffer *leaf, |
| 1211 | struct btrfs_key *key, int slot) |
| 1212 | { |
| 1213 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
| 1214 | struct btrfs_extent_item *ei; |
| 1215 | bool is_tree_block = false; |
| 1216 | unsigned long ptr; /* Current pointer inside inline refs */ |
| 1217 | unsigned long end; /* Extent item end */ |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1218 | const u32 item_size = btrfs_item_size(leaf, slot); |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1219 | u64 flags; |
| 1220 | u64 generation; |
| 1221 | u64 total_refs; /* Total refs in btrfs_extent_item */ |
| 1222 | u64 inline_refs = 0; /* found total inline refs */ |
| 1223 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1224 | if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY && |
| 1225 | !btrfs_fs_incompat(fs_info, SKINNY_METADATA))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1226 | generic_err(leaf, slot, |
| 1227 | "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled"); |
| 1228 | return -EUCLEAN; |
| 1229 | } |
| 1230 | /* key->objectid is the bytenr for both key types */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1231 | if (unlikely(!IS_ALIGNED(key->objectid, fs_info->sectorsize))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1232 | generic_err(leaf, slot, |
| 1233 | "invalid key objectid, have %llu expect to be aligned to %u", |
| 1234 | key->objectid, fs_info->sectorsize); |
| 1235 | return -EUCLEAN; |
| 1236 | } |
| 1237 | |
| 1238 | /* key->offset is tree level for METADATA_ITEM_KEY */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1239 | if (unlikely(key->type == BTRFS_METADATA_ITEM_KEY && |
| 1240 | key->offset >= BTRFS_MAX_LEVEL)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1241 | extent_err(leaf, slot, |
| 1242 | "invalid tree level, have %llu expect [0, %u]", |
| 1243 | key->offset, BTRFS_MAX_LEVEL - 1); |
| 1244 | return -EUCLEAN; |
| 1245 | } |
| 1246 | |
| 1247 | /* |
| 1248 | * EXTENT/METADATA_ITEM consists of: |
| 1249 | * 1) One btrfs_extent_item |
| 1250 | * Records the total refs, type and generation of the extent. |
| 1251 | * |
| 1252 | * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only) |
| 1253 | * Records the first key and level of the tree block. |
| 1254 | * |
| 1255 | * 2) Zero or more btrfs_extent_inline_ref(s) |
| 1256 | * Each inline ref has one btrfs_extent_inline_ref shows: |
| 1257 | * 2.1) The ref type, one of the 4 |
| 1258 | * TREE_BLOCK_REF Tree block only |
| 1259 | * SHARED_BLOCK_REF Tree block only |
| 1260 | * EXTENT_DATA_REF Data only |
| 1261 | * SHARED_DATA_REF Data only |
| 1262 | * 2.2) Ref type specific data |
| 1263 | * Either using btrfs_extent_inline_ref::offset, or specific |
| 1264 | * data structure. |
| 1265 | */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1266 | if (unlikely(item_size < sizeof(*ei))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1267 | extent_err(leaf, slot, |
| 1268 | "invalid item size, have %u expect [%zu, %u)", |
| 1269 | item_size, sizeof(*ei), |
| 1270 | BTRFS_LEAF_DATA_SIZE(fs_info)); |
| 1271 | return -EUCLEAN; |
| 1272 | } |
| 1273 | end = item_size + btrfs_item_ptr_offset(leaf, slot); |
| 1274 | |
| 1275 | /* Checks against extent_item */ |
| 1276 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); |
| 1277 | flags = btrfs_extent_flags(leaf, ei); |
| 1278 | total_refs = btrfs_extent_refs(leaf, ei); |
| 1279 | generation = btrfs_extent_generation(leaf, ei); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1280 | if (unlikely(generation > |
| 1281 | btrfs_super_generation(fs_info->super_copy) + 1)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1282 | extent_err(leaf, slot, |
| 1283 | "invalid generation, have %llu expect (0, %llu]", |
| 1284 | generation, |
| 1285 | btrfs_super_generation(fs_info->super_copy) + 1); |
| 1286 | return -EUCLEAN; |
| 1287 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1288 | if (unlikely(!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA | |
| 1289 | BTRFS_EXTENT_FLAG_TREE_BLOCK)))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1290 | extent_err(leaf, slot, |
| 1291 | "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx", |
| 1292 | flags, BTRFS_EXTENT_FLAG_DATA | |
| 1293 | BTRFS_EXTENT_FLAG_TREE_BLOCK); |
| 1294 | return -EUCLEAN; |
| 1295 | } |
| 1296 | is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK); |
| 1297 | if (is_tree_block) { |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1298 | if (unlikely(key->type == BTRFS_EXTENT_ITEM_KEY && |
| 1299 | key->offset != fs_info->nodesize)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1300 | extent_err(leaf, slot, |
| 1301 | "invalid extent length, have %llu expect %u", |
| 1302 | key->offset, fs_info->nodesize); |
| 1303 | return -EUCLEAN; |
| 1304 | } |
| 1305 | } else { |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1306 | if (unlikely(key->type != BTRFS_EXTENT_ITEM_KEY)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1307 | extent_err(leaf, slot, |
| 1308 | "invalid key type, have %u expect %u for data backref", |
| 1309 | key->type, BTRFS_EXTENT_ITEM_KEY); |
| 1310 | return -EUCLEAN; |
| 1311 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1312 | if (unlikely(!IS_ALIGNED(key->offset, fs_info->sectorsize))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1313 | extent_err(leaf, slot, |
| 1314 | "invalid extent length, have %llu expect aligned to %u", |
| 1315 | key->offset, fs_info->sectorsize); |
| 1316 | return -EUCLEAN; |
| 1317 | } |
Josef Bacik | 0ebb6bbb | 2021-03-12 15:25:26 -0500 | [diff] [blame] | 1318 | if (unlikely(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { |
| 1319 | extent_err(leaf, slot, |
| 1320 | "invalid extent flag, data has full backref set"); |
| 1321 | return -EUCLEAN; |
| 1322 | } |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1323 | } |
| 1324 | ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1); |
| 1325 | |
| 1326 | /* Check the special case of btrfs_tree_block_info */ |
| 1327 | if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) { |
| 1328 | struct btrfs_tree_block_info *info; |
| 1329 | |
| 1330 | info = (struct btrfs_tree_block_info *)ptr; |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1331 | if (unlikely(btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1332 | extent_err(leaf, slot, |
| 1333 | "invalid tree block info level, have %u expect [0, %u]", |
| 1334 | btrfs_tree_block_level(leaf, info), |
| 1335 | BTRFS_MAX_LEVEL - 1); |
| 1336 | return -EUCLEAN; |
| 1337 | } |
| 1338 | ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1); |
| 1339 | } |
| 1340 | |
| 1341 | /* Check inline refs */ |
| 1342 | while (ptr < end) { |
| 1343 | struct btrfs_extent_inline_ref *iref; |
| 1344 | struct btrfs_extent_data_ref *dref; |
| 1345 | struct btrfs_shared_data_ref *sref; |
| 1346 | u64 dref_offset; |
| 1347 | u64 inline_offset; |
| 1348 | u8 inline_type; |
| 1349 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1350 | if (unlikely(ptr + sizeof(*iref) > end)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1351 | extent_err(leaf, slot, |
| 1352 | "inline ref item overflows extent item, ptr %lu iref size %zu end %lu", |
| 1353 | ptr, sizeof(*iref), end); |
| 1354 | return -EUCLEAN; |
| 1355 | } |
| 1356 | iref = (struct btrfs_extent_inline_ref *)ptr; |
| 1357 | inline_type = btrfs_extent_inline_ref_type(leaf, iref); |
| 1358 | inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1359 | if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1360 | extent_err(leaf, slot, |
| 1361 | "inline ref item overflows extent item, ptr %lu iref size %u end %lu", |
| 1362 | ptr, inline_type, end); |
| 1363 | return -EUCLEAN; |
| 1364 | } |
| 1365 | |
| 1366 | switch (inline_type) { |
| 1367 | /* inline_offset is subvolid of the owner, no need to check */ |
| 1368 | case BTRFS_TREE_BLOCK_REF_KEY: |
| 1369 | inline_refs++; |
| 1370 | break; |
| 1371 | /* Contains parent bytenr */ |
| 1372 | case BTRFS_SHARED_BLOCK_REF_KEY: |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1373 | if (unlikely(!IS_ALIGNED(inline_offset, |
| 1374 | fs_info->sectorsize))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1375 | extent_err(leaf, slot, |
| 1376 | "invalid tree parent bytenr, have %llu expect aligned to %u", |
| 1377 | inline_offset, fs_info->sectorsize); |
| 1378 | return -EUCLEAN; |
| 1379 | } |
| 1380 | inline_refs++; |
| 1381 | break; |
| 1382 | /* |
| 1383 | * Contains owner subvolid, owner key objectid, adjusted offset. |
| 1384 | * The only obvious corruption can happen in that offset. |
| 1385 | */ |
| 1386 | case BTRFS_EXTENT_DATA_REF_KEY: |
| 1387 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
| 1388 | dref_offset = btrfs_extent_data_ref_offset(leaf, dref); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1389 | if (unlikely(!IS_ALIGNED(dref_offset, |
| 1390 | fs_info->sectorsize))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1391 | extent_err(leaf, slot, |
| 1392 | "invalid data ref offset, have %llu expect aligned to %u", |
| 1393 | dref_offset, fs_info->sectorsize); |
| 1394 | return -EUCLEAN; |
| 1395 | } |
| 1396 | inline_refs += btrfs_extent_data_ref_count(leaf, dref); |
| 1397 | break; |
| 1398 | /* Contains parent bytenr and ref count */ |
| 1399 | case BTRFS_SHARED_DATA_REF_KEY: |
| 1400 | sref = (struct btrfs_shared_data_ref *)(iref + 1); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1401 | if (unlikely(!IS_ALIGNED(inline_offset, |
| 1402 | fs_info->sectorsize))) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1403 | extent_err(leaf, slot, |
| 1404 | "invalid data parent bytenr, have %llu expect aligned to %u", |
| 1405 | inline_offset, fs_info->sectorsize); |
| 1406 | return -EUCLEAN; |
| 1407 | } |
| 1408 | inline_refs += btrfs_shared_data_ref_count(leaf, sref); |
| 1409 | break; |
| 1410 | default: |
| 1411 | extent_err(leaf, slot, "unknown inline ref type: %u", |
| 1412 | inline_type); |
| 1413 | return -EUCLEAN; |
| 1414 | } |
| 1415 | ptr += btrfs_extent_inline_ref_size(inline_type); |
| 1416 | } |
| 1417 | /* No padding is allowed */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1418 | if (unlikely(ptr != end)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1419 | extent_err(leaf, slot, |
| 1420 | "invalid extent item size, padding bytes found"); |
| 1421 | return -EUCLEAN; |
| 1422 | } |
| 1423 | |
| 1424 | /* Finally, check the inline refs against total refs */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1425 | if (unlikely(inline_refs > total_refs)) { |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1426 | extent_err(leaf, slot, |
| 1427 | "invalid extent refs, have %llu expect >= inline %llu", |
| 1428 | total_refs, inline_refs); |
| 1429 | return -EUCLEAN; |
| 1430 | } |
| 1431 | return 0; |
| 1432 | } |
| 1433 | |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1434 | static int check_simple_keyed_refs(struct extent_buffer *leaf, |
| 1435 | struct btrfs_key *key, int slot) |
| 1436 | { |
| 1437 | u32 expect_item_size = 0; |
| 1438 | |
| 1439 | if (key->type == BTRFS_SHARED_DATA_REF_KEY) |
| 1440 | expect_item_size = sizeof(struct btrfs_shared_data_ref); |
| 1441 | |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1442 | if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) { |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1443 | generic_err(leaf, slot, |
| 1444 | "invalid item size, have %u expect %u for key type %u", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1445 | btrfs_item_size(leaf, slot), |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1446 | expect_item_size, key->type); |
| 1447 | return -EUCLEAN; |
| 1448 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1449 | if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) { |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1450 | generic_err(leaf, slot, |
| 1451 | "invalid key objectid for shared block ref, have %llu expect aligned to %u", |
| 1452 | key->objectid, leaf->fs_info->sectorsize); |
| 1453 | return -EUCLEAN; |
| 1454 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1455 | if (unlikely(key->type != BTRFS_TREE_BLOCK_REF_KEY && |
| 1456 | !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize))) { |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1457 | extent_err(leaf, slot, |
| 1458 | "invalid tree parent bytenr, have %llu expect aligned to %u", |
| 1459 | key->offset, leaf->fs_info->sectorsize); |
| 1460 | return -EUCLEAN; |
| 1461 | } |
| 1462 | return 0; |
| 1463 | } |
| 1464 | |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1465 | static int check_extent_data_ref(struct extent_buffer *leaf, |
| 1466 | struct btrfs_key *key, int slot) |
| 1467 | { |
| 1468 | struct btrfs_extent_data_ref *dref; |
| 1469 | unsigned long ptr = btrfs_item_ptr_offset(leaf, slot); |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1470 | const unsigned long end = ptr + btrfs_item_size(leaf, slot); |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1471 | |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1472 | if (unlikely(btrfs_item_size(leaf, slot) % sizeof(*dref) != 0)) { |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1473 | generic_err(leaf, slot, |
| 1474 | "invalid item size, have %u expect aligned to %zu for key type %u", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1475 | btrfs_item_size(leaf, slot), |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1476 | sizeof(*dref), key->type); |
David Sterba | 6d06b0a | 2020-11-16 19:53:52 +0100 | [diff] [blame] | 1477 | return -EUCLEAN; |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1478 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1479 | if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) { |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1480 | generic_err(leaf, slot, |
| 1481 | "invalid key objectid for shared block ref, have %llu expect aligned to %u", |
| 1482 | key->objectid, leaf->fs_info->sectorsize); |
| 1483 | return -EUCLEAN; |
| 1484 | } |
| 1485 | for (; ptr < end; ptr += sizeof(*dref)) { |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1486 | u64 offset; |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1487 | |
Josef Bacik | 1119a72 | 2021-02-16 15:43:22 -0500 | [diff] [blame] | 1488 | /* |
| 1489 | * We cannot check the extent_data_ref hash due to possible |
| 1490 | * overflow from the leaf due to hash collisions. |
| 1491 | */ |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1492 | dref = (struct btrfs_extent_data_ref *)ptr; |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1493 | offset = btrfs_extent_data_ref_offset(leaf, dref); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1494 | if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) { |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1495 | extent_err(leaf, slot, |
| 1496 | "invalid extent data backref offset, have %llu expect aligned to %u", |
| 1497 | offset, leaf->fs_info->sectorsize); |
David Sterba | 6d06b0a | 2020-11-16 19:53:52 +0100 | [diff] [blame] | 1498 | return -EUCLEAN; |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1499 | } |
| 1500 | } |
| 1501 | return 0; |
| 1502 | } |
| 1503 | |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1504 | #define inode_ref_err(eb, slot, fmt, args...) \ |
| 1505 | inode_item_err(eb, slot, fmt, ##args) |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1506 | static int check_inode_ref(struct extent_buffer *leaf, |
| 1507 | struct btrfs_key *key, struct btrfs_key *prev_key, |
| 1508 | int slot) |
| 1509 | { |
| 1510 | struct btrfs_inode_ref *iref; |
| 1511 | unsigned long ptr; |
| 1512 | unsigned long end; |
| 1513 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1514 | if (unlikely(!check_prev_ino(leaf, key, slot, prev_key))) |
Qu Wenruo | 80d7fd1 | 2019-10-04 17:31:32 +0800 | [diff] [blame] | 1515 | return -EUCLEAN; |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1516 | /* namelen can't be 0, so item_size == sizeof() is also invalid */ |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1517 | if (unlikely(btrfs_item_size(leaf, slot) <= sizeof(*iref))) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1518 | inode_ref_err(leaf, slot, |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1519 | "invalid item size, have %u expect (%zu, %u)", |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1520 | btrfs_item_size(leaf, slot), |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1521 | sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); |
| 1522 | return -EUCLEAN; |
| 1523 | } |
| 1524 | |
| 1525 | ptr = btrfs_item_ptr_offset(leaf, slot); |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1526 | end = ptr + btrfs_item_size(leaf, slot); |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1527 | while (ptr < end) { |
| 1528 | u16 namelen; |
| 1529 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1530 | if (unlikely(ptr + sizeof(iref) > end)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1531 | inode_ref_err(leaf, slot, |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1532 | "inode ref overflow, ptr %lu end %lu inode_ref_size %zu", |
| 1533 | ptr, end, sizeof(iref)); |
| 1534 | return -EUCLEAN; |
| 1535 | } |
| 1536 | |
| 1537 | iref = (struct btrfs_inode_ref *)ptr; |
| 1538 | namelen = btrfs_inode_ref_name_len(leaf, iref); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1539 | if (unlikely(ptr + sizeof(*iref) + namelen > end)) { |
Qu Wenruo | c3053eb | 2019-12-09 18:54:32 +0800 | [diff] [blame] | 1540 | inode_ref_err(leaf, slot, |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1541 | "inode ref overflow, ptr %lu end %lu namelen %u", |
| 1542 | ptr, end, namelen); |
| 1543 | return -EUCLEAN; |
| 1544 | } |
| 1545 | |
| 1546 | /* |
| 1547 | * NOTE: In theory we should record all found index numbers |
| 1548 | * to find any duplicated indexes, but that will be too time |
| 1549 | * consuming for inodes with too many hard links. |
| 1550 | */ |
| 1551 | ptr += sizeof(*iref) + namelen; |
| 1552 | } |
| 1553 | return 0; |
| 1554 | } |
| 1555 | |
Qu Wenruo | 82fc28f | 2019-03-20 13:16:42 +0800 | [diff] [blame] | 1556 | /* |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1557 | * Common point to switch the item-specific validation. |
| 1558 | */ |
David Sterba | 0076bc89 | 2019-03-20 16:22:00 +0100 | [diff] [blame] | 1559 | static int check_leaf_item(struct extent_buffer *leaf, |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 1560 | struct btrfs_key *key, int slot, |
| 1561 | struct btrfs_key *prev_key) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1562 | { |
| 1563 | int ret = 0; |
Qu Wenruo | 075cb3c | 2019-03-20 13:42:33 +0800 | [diff] [blame] | 1564 | struct btrfs_chunk *chunk; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1565 | |
| 1566 | switch (key->type) { |
| 1567 | case BTRFS_EXTENT_DATA_KEY: |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 1568 | ret = check_extent_data_item(leaf, key, slot, prev_key); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1569 | break; |
| 1570 | case BTRFS_EXTENT_CSUM_KEY: |
Filipe Manana | ad1d8c4 | 2019-12-02 11:01:03 +0000 | [diff] [blame] | 1571 | ret = check_csum_item(leaf, key, slot, prev_key); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1572 | break; |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 1573 | case BTRFS_DIR_ITEM_KEY: |
| 1574 | case BTRFS_DIR_INDEX_KEY: |
| 1575 | case BTRFS_XATTR_ITEM_KEY: |
Qu Wenruo | c18679e | 2019-08-26 15:40:38 +0800 | [diff] [blame] | 1576 | ret = check_dir_item(leaf, key, prev_key, slot); |
Qu Wenruo | ad7b0368 | 2017-11-08 08:54:25 +0800 | [diff] [blame] | 1577 | break; |
Qu Wenruo | 71bf92a9 | 2019-08-26 15:40:39 +0800 | [diff] [blame] | 1578 | case BTRFS_INODE_REF_KEY: |
| 1579 | ret = check_inode_ref(leaf, key, prev_key, slot); |
| 1580 | break; |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 1581 | case BTRFS_BLOCK_GROUP_ITEM_KEY: |
David Sterba | af60ce2 | 2019-03-20 16:19:31 +0100 | [diff] [blame] | 1582 | ret = check_block_group_item(leaf, key, slot); |
Qu Wenruo | fce466e | 2018-07-03 17:10:05 +0800 | [diff] [blame] | 1583 | break; |
Qu Wenruo | 075cb3c | 2019-03-20 13:42:33 +0800 | [diff] [blame] | 1584 | case BTRFS_CHUNK_ITEM_KEY: |
| 1585 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); |
Qu Wenruo | f6d2a5c | 2019-12-17 18:58:20 +0800 | [diff] [blame] | 1586 | ret = check_leaf_chunk_item(leaf, chunk, key, slot); |
Qu Wenruo | 075cb3c | 2019-03-20 13:42:33 +0800 | [diff] [blame] | 1587 | break; |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 1588 | case BTRFS_DEV_ITEM_KEY: |
David Sterba | 412a231 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1589 | ret = check_dev_item(leaf, key, slot); |
Qu Wenruo | ab4ba2e | 2019-03-08 14:20:03 +0800 | [diff] [blame] | 1590 | break; |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1591 | case BTRFS_INODE_ITEM_KEY: |
David Sterba | 39e57f4 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1592 | ret = check_inode_item(leaf, key, slot); |
Qu Wenruo | 496245c | 2019-03-13 14:31:35 +0800 | [diff] [blame] | 1593 | break; |
Qu Wenruo | 259ee77 | 2019-07-16 17:00:34 +0800 | [diff] [blame] | 1594 | case BTRFS_ROOT_ITEM_KEY: |
| 1595 | ret = check_root_item(leaf, key, slot); |
| 1596 | break; |
Qu Wenruo | f82d1c7c | 2019-08-09 09:24:22 +0800 | [diff] [blame] | 1597 | case BTRFS_EXTENT_ITEM_KEY: |
| 1598 | case BTRFS_METADATA_ITEM_KEY: |
| 1599 | ret = check_extent_item(leaf, key, slot); |
| 1600 | break; |
Qu Wenruo | e2406a6 | 2019-08-09 09:24:23 +0800 | [diff] [blame] | 1601 | case BTRFS_TREE_BLOCK_REF_KEY: |
| 1602 | case BTRFS_SHARED_DATA_REF_KEY: |
| 1603 | case BTRFS_SHARED_BLOCK_REF_KEY: |
| 1604 | ret = check_simple_keyed_refs(leaf, key, slot); |
| 1605 | break; |
Qu Wenruo | 0785a9a | 2019-08-09 09:24:24 +0800 | [diff] [blame] | 1606 | case BTRFS_EXTENT_DATA_REF_KEY: |
| 1607 | ret = check_extent_data_ref(leaf, key, slot); |
| 1608 | break; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1609 | } |
| 1610 | return ret; |
| 1611 | } |
| 1612 | |
David Sterba | e2ccd36 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1613 | static int check_leaf(struct extent_buffer *leaf, bool check_item_data) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1614 | { |
David Sterba | e2ccd36 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1615 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1616 | /* No valid key type is 0, so all key should be larger than this key */ |
| 1617 | struct btrfs_key prev_key = {0, 0, 0}; |
| 1618 | struct btrfs_key key; |
| 1619 | u32 nritems = btrfs_header_nritems(leaf); |
| 1620 | int slot; |
| 1621 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1622 | if (unlikely(btrfs_header_level(leaf) != 0)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1623 | generic_err(leaf, 0, |
Qu Wenruo | f556faa | 2018-09-28 07:59:34 +0800 | [diff] [blame] | 1624 | "invalid level for leaf, have %d expect 0", |
| 1625 | btrfs_header_level(leaf)); |
| 1626 | return -EUCLEAN; |
| 1627 | } |
| 1628 | |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1629 | /* |
| 1630 | * Extent buffers from a relocation tree have a owner field that |
| 1631 | * corresponds to the subvolume tree they are based on. So just from an |
| 1632 | * extent buffer alone we can not find out what is the id of the |
| 1633 | * corresponding subvolume tree, so we can not figure out if the extent |
| 1634 | * buffer corresponds to the root of the relocation tree or not. So |
| 1635 | * skip this check for relocation trees. |
| 1636 | */ |
| 1637 | if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) { |
Qu Wenruo | ba480dd | 2018-07-03 17:10:06 +0800 | [diff] [blame] | 1638 | u64 owner = btrfs_header_owner(leaf); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1639 | |
Qu Wenruo | ba480dd | 2018-07-03 17:10:06 +0800 | [diff] [blame] | 1640 | /* These trees must never be empty */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1641 | if (unlikely(owner == BTRFS_ROOT_TREE_OBJECTID || |
| 1642 | owner == BTRFS_CHUNK_TREE_OBJECTID || |
| 1643 | owner == BTRFS_EXTENT_TREE_OBJECTID || |
| 1644 | owner == BTRFS_DEV_TREE_OBJECTID || |
| 1645 | owner == BTRFS_FS_TREE_OBJECTID || |
| 1646 | owner == BTRFS_DATA_RELOC_TREE_OBJECTID)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1647 | generic_err(leaf, 0, |
Qu Wenruo | ba480dd | 2018-07-03 17:10:06 +0800 | [diff] [blame] | 1648 | "invalid root, root %llu must never be empty", |
| 1649 | owner); |
| 1650 | return -EUCLEAN; |
| 1651 | } |
Qu Wenruo | 62fdaa5 | 2019-08-22 10:14:15 +0800 | [diff] [blame] | 1652 | /* Unknown tree */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1653 | if (unlikely(owner == 0)) { |
Qu Wenruo | 62fdaa5 | 2019-08-22 10:14:15 +0800 | [diff] [blame] | 1654 | generic_err(leaf, 0, |
| 1655 | "invalid owner, root 0 is not defined"); |
| 1656 | return -EUCLEAN; |
| 1657 | } |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1658 | return 0; |
| 1659 | } |
| 1660 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1661 | if (unlikely(nritems == 0)) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1662 | return 0; |
| 1663 | |
| 1664 | /* |
| 1665 | * Check the following things to make sure this is a good leaf, and |
| 1666 | * leaf users won't need to bother with similar sanity checks: |
| 1667 | * |
| 1668 | * 1) key ordering |
| 1669 | * 2) item offset and size |
| 1670 | * No overlap, no hole, all inside the leaf. |
| 1671 | * 3) item content |
| 1672 | * If possible, do comprehensive sanity check. |
| 1673 | * NOTE: All checks must only rely on the item data itself. |
| 1674 | */ |
| 1675 | for (slot = 0; slot < nritems; slot++) { |
| 1676 | u32 item_end_expected; |
| 1677 | int ret; |
| 1678 | |
| 1679 | btrfs_item_key_to_cpu(leaf, &key, slot); |
| 1680 | |
| 1681 | /* Make sure the keys are in the right order */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1682 | if (unlikely(btrfs_comp_cpu_keys(&prev_key, &key) >= 0)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1683 | generic_err(leaf, slot, |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1684 | "bad key order, prev (%llu %u %llu) current (%llu %u %llu)", |
| 1685 | prev_key.objectid, prev_key.type, |
| 1686 | prev_key.offset, key.objectid, key.type, |
| 1687 | key.offset); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1688 | return -EUCLEAN; |
| 1689 | } |
| 1690 | |
| 1691 | /* |
| 1692 | * Make sure the offset and ends are right, remember that the |
| 1693 | * item data starts at the end of the leaf and grows towards the |
| 1694 | * front. |
| 1695 | */ |
| 1696 | if (slot == 0) |
| 1697 | item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info); |
| 1698 | else |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 1699 | item_end_expected = btrfs_item_offset(leaf, |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1700 | slot - 1); |
Josef Bacik | dc2e724 | 2021-10-21 14:58:37 -0400 | [diff] [blame] | 1701 | if (unlikely(btrfs_item_data_end(leaf, slot) != item_end_expected)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1702 | generic_err(leaf, slot, |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1703 | "unexpected item end, have %u expect %u", |
Josef Bacik | dc2e724 | 2021-10-21 14:58:37 -0400 | [diff] [blame] | 1704 | btrfs_item_data_end(leaf, slot), |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1705 | item_end_expected); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1706 | return -EUCLEAN; |
| 1707 | } |
| 1708 | |
| 1709 | /* |
| 1710 | * Check to make sure that we don't point outside of the leaf, |
| 1711 | * just in case all the items are consistent to each other, but |
| 1712 | * all point outside of the leaf. |
| 1713 | */ |
Josef Bacik | dc2e724 | 2021-10-21 14:58:37 -0400 | [diff] [blame] | 1714 | if (unlikely(btrfs_item_data_end(leaf, slot) > |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1715 | BTRFS_LEAF_DATA_SIZE(fs_info))) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1716 | generic_err(leaf, slot, |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1717 | "slot end outside of leaf, have %u expect range [0, %u]", |
Josef Bacik | dc2e724 | 2021-10-21 14:58:37 -0400 | [diff] [blame] | 1718 | btrfs_item_data_end(leaf, slot), |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1719 | BTRFS_LEAF_DATA_SIZE(fs_info)); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1720 | return -EUCLEAN; |
| 1721 | } |
| 1722 | |
| 1723 | /* Also check if the item pointer overlaps with btrfs item. */ |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1724 | if (unlikely(btrfs_item_ptr_offset(leaf, slot) < |
| 1725 | btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item))) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1726 | generic_err(leaf, slot, |
Qu Wenruo | 478d01b | 2017-10-09 01:51:04 +0000 | [diff] [blame] | 1727 | "slot overlaps with its data, item end %lu data start %lu", |
| 1728 | btrfs_item_nr_offset(slot) + |
| 1729 | sizeof(struct btrfs_item), |
| 1730 | btrfs_item_ptr_offset(leaf, slot)); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1731 | return -EUCLEAN; |
| 1732 | } |
| 1733 | |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1734 | if (check_item_data) { |
| 1735 | /* |
| 1736 | * Check if the item size and content meet other |
| 1737 | * criteria |
| 1738 | */ |
Filipe Manana | 4e9845e | 2019-05-06 16:44:12 +0100 | [diff] [blame] | 1739 | ret = check_leaf_item(leaf, &key, slot, &prev_key); |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1740 | if (unlikely(ret < 0)) |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1741 | return ret; |
| 1742 | } |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1743 | |
| 1744 | prev_key.objectid = key.objectid; |
| 1745 | prev_key.type = key.type; |
| 1746 | prev_key.offset = key.offset; |
| 1747 | } |
| 1748 | |
| 1749 | return 0; |
| 1750 | } |
| 1751 | |
David Sterba | 1c4360e | 2019-03-20 16:23:29 +0100 | [diff] [blame] | 1752 | int btrfs_check_leaf_full(struct extent_buffer *leaf) |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1753 | { |
David Sterba | e2ccd36 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1754 | return check_leaf(leaf, true); |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1755 | } |
Qu Wenruo | 02529d7 | 2019-04-24 15:22:53 +0800 | [diff] [blame] | 1756 | ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO); |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1757 | |
David Sterba | cfdaad5 | 2019-03-20 16:24:18 +0100 | [diff] [blame] | 1758 | int btrfs_check_leaf_relaxed(struct extent_buffer *leaf) |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1759 | { |
David Sterba | e2ccd36 | 2019-03-20 16:22:58 +0100 | [diff] [blame] | 1760 | return check_leaf(leaf, false); |
Qu Wenruo | 69fc6cb | 2017-11-08 08:54:24 +0800 | [diff] [blame] | 1761 | } |
| 1762 | |
David Sterba | 813fd1d | 2019-03-20 16:25:00 +0100 | [diff] [blame] | 1763 | int btrfs_check_node(struct extent_buffer *node) |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1764 | { |
David Sterba | 813fd1d | 2019-03-20 16:25:00 +0100 | [diff] [blame] | 1765 | struct btrfs_fs_info *fs_info = node->fs_info; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1766 | unsigned long nr = btrfs_header_nritems(node); |
| 1767 | struct btrfs_key key, next_key; |
| 1768 | int slot; |
Qu Wenruo | f556faa | 2018-09-28 07:59:34 +0800 | [diff] [blame] | 1769 | int level = btrfs_header_level(node); |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1770 | u64 bytenr; |
| 1771 | int ret = 0; |
| 1772 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1773 | if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1774 | generic_err(node, 0, |
Qu Wenruo | f556faa | 2018-09-28 07:59:34 +0800 | [diff] [blame] | 1775 | "invalid level for node, have %d expect [1, %d]", |
| 1776 | level, BTRFS_MAX_LEVEL - 1); |
| 1777 | return -EUCLEAN; |
| 1778 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1779 | if (unlikely(nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info))) { |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 1780 | btrfs_crit(fs_info, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1781 | "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]", |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 1782 | btrfs_header_owner(node), node->start, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1783 | nr == 0 ? "small" : "large", nr, |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 1784 | BTRFS_NODEPTRS_PER_BLOCK(fs_info)); |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1785 | return -EUCLEAN; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1786 | } |
| 1787 | |
| 1788 | for (slot = 0; slot < nr - 1; slot++) { |
| 1789 | bytenr = btrfs_node_blockptr(node, slot); |
| 1790 | btrfs_node_key_to_cpu(node, &key, slot); |
| 1791 | btrfs_node_key_to_cpu(node, &next_key, slot + 1); |
| 1792 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1793 | if (unlikely(!bytenr)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1794 | generic_err(node, slot, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1795 | "invalid NULL node pointer"); |
| 1796 | ret = -EUCLEAN; |
| 1797 | goto out; |
| 1798 | } |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1799 | if (unlikely(!IS_ALIGNED(bytenr, fs_info->sectorsize))) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1800 | generic_err(node, slot, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1801 | "unaligned pointer, have %llu should be aligned to %u", |
Qu Wenruo | 2f65954 | 2018-01-25 14:56:18 +0800 | [diff] [blame] | 1802 | bytenr, fs_info->sectorsize); |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1803 | ret = -EUCLEAN; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1804 | goto out; |
| 1805 | } |
| 1806 | |
David Sterba | c7c01a4 | 2020-11-04 16:12:45 +0100 | [diff] [blame] | 1807 | if (unlikely(btrfs_comp_cpu_keys(&key, &next_key) >= 0)) { |
David Sterba | 86a6be3 | 2019-03-20 15:31:28 +0100 | [diff] [blame] | 1808 | generic_err(node, slot, |
Qu Wenruo | bba4f29 | 2017-10-09 01:51:03 +0000 | [diff] [blame] | 1809 | "bad key order, current (%llu %u %llu) next (%llu %u %llu)", |
| 1810 | key.objectid, key.type, key.offset, |
| 1811 | next_key.objectid, next_key.type, |
| 1812 | next_key.offset); |
| 1813 | ret = -EUCLEAN; |
Qu Wenruo | 557ea5d | 2017-10-09 01:51:02 +0000 | [diff] [blame] | 1814 | goto out; |
| 1815 | } |
| 1816 | } |
| 1817 | out: |
| 1818 | return ret; |
| 1819 | } |
Qu Wenruo | 02529d7 | 2019-04-24 15:22:53 +0800 | [diff] [blame] | 1820 | ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO); |