blob: f041ec5f415c41de51c126aa6466f4f86d8ce016 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Qu Wenruo557ea5d2017-10-09 01:51:02 +00002/*
3 * Copyright (C) Qu Wenruo 2017. All rights reserved.
Qu Wenruo557ea5d2017-10-09 01:51:02 +00004 */
5
6/*
7 * The module is used to catch unexpected/corrupted tree block data.
8 * Such behavior can be caused either by a fuzzed image or bugs.
9 *
10 * The objective is to do leaf/node validation checks when tree block is read
11 * from disk, and check *every* possible member, so other code won't
12 * need to checking them again.
13 *
14 * Due to the potential and unwanted damage, every checker needs to be
15 * carefully reviewed otherwise so it does not prevent mount of valid images.
16 */
17
Qu Wenruo02529d72019-04-24 15:22:53 +080018#include <linux/types.h>
19#include <linux/stddef.h>
20#include <linux/error-injection.h>
Qu Wenruo557ea5d2017-10-09 01:51:02 +000021#include "ctree.h"
22#include "tree-checker.h"
23#include "disk-io.h"
24#include "compression.h"
Qu Wenruofce466e2018-07-03 17:10:05 +080025#include "volumes.h"
Qu Wenruo557ea5d2017-10-09 01:51:02 +000026
Qu Wenruobba4f292017-10-09 01:51:03 +000027/*
28 * Error message should follow the following format:
29 * corrupt <type>: <identifier>, <reason>[, <bad_value>]
30 *
31 * @type: leaf or node
32 * @identifier: the necessary info to locate the leaf/node.
Andrea Gelmini52042d82018-11-28 12:05:13 +010033 * It's recommended to decode key.objecitd/offset if it's
Qu Wenruobba4f292017-10-09 01:51:03 +000034 * meaningful.
35 * @reason: describe the error
Andrea Gelmini52042d82018-11-28 12:05:13 +010036 * @bad_value: optional, it's recommended to output bad value and its
Qu Wenruobba4f292017-10-09 01:51:03 +000037 * expected value (range).
38 *
39 * Since comma is used to separate the components, only space is allowed
40 * inside each component.
41 */
42
43/*
44 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
45 * Allows callers to customize the output.
46 */
David Sterba86a6be32019-03-20 15:31:28 +010047__printf(3, 4)
David Sterbae67c7182018-02-19 17:24:18 +010048__cold
David Sterba86a6be32019-03-20 15:31:28 +010049static void generic_err(const struct extent_buffer *eb, int slot,
Qu Wenruobba4f292017-10-09 01:51:03 +000050 const char *fmt, ...)
51{
David Sterba86a6be32019-03-20 15:31:28 +010052 const struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruobba4f292017-10-09 01:51:03 +000053 struct va_format vaf;
54 va_list args;
55
56 va_start(args, fmt);
57
58 vaf.fmt = fmt;
59 vaf.va = &args;
60
Qu Wenruo2f659542018-01-25 14:56:18 +080061 btrfs_crit(fs_info,
Qu Wenruobba4f292017-10-09 01:51:03 +000062 "corrupt %s: root=%llu block=%llu slot=%d, %pV",
63 btrfs_header_level(eb) == 0 ? "leaf" : "node",
Qu Wenruo2f659542018-01-25 14:56:18 +080064 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
Qu Wenruobba4f292017-10-09 01:51:03 +000065 va_end(args);
66}
67
Qu Wenruo8806d712017-10-09 01:51:06 +000068/*
69 * Customized reporter for extent data item, since its key objectid and
70 * offset has its own meaning.
71 */
David Sterba1fd715f2019-03-20 15:32:46 +010072__printf(3, 4)
David Sterbae67c7182018-02-19 17:24:18 +010073__cold
David Sterba1fd715f2019-03-20 15:32:46 +010074static void file_extent_err(const struct extent_buffer *eb, int slot,
Qu Wenruo8806d712017-10-09 01:51:06 +000075 const char *fmt, ...)
76{
David Sterba1fd715f2019-03-20 15:32:46 +010077 const struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruo8806d712017-10-09 01:51:06 +000078 struct btrfs_key key;
79 struct va_format vaf;
80 va_list args;
81
82 btrfs_item_key_to_cpu(eb, &key, slot);
83 va_start(args, fmt);
84
85 vaf.fmt = fmt;
86 vaf.va = &args;
87
Qu Wenruo2f659542018-01-25 14:56:18 +080088 btrfs_crit(fs_info,
Qu Wenruo8806d712017-10-09 01:51:06 +000089 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
Qu Wenruo2f659542018-01-25 14:56:18 +080090 btrfs_header_level(eb) == 0 ? "leaf" : "node",
91 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
92 key.objectid, key.offset, &vaf);
Qu Wenruo8806d712017-10-09 01:51:06 +000093 va_end(args);
94}
95
96/*
97 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
98 * Else return 1
99 */
David Sterba033774d2019-03-20 15:59:22 +0100100#define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \
Qu Wenruo8806d712017-10-09 01:51:06 +0000101({ \
102 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
David Sterba1fd715f2019-03-20 15:32:46 +0100103 file_extent_err((leaf), (slot), \
Qu Wenruo8806d712017-10-09 01:51:06 +0000104 "invalid %s for file extent, have %llu, should be aligned to %u", \
105 (#name), btrfs_file_extent_##name((leaf), (fi)), \
106 (alignment)); \
107 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
108})
109
Filipe Manana4e9845e2019-05-06 16:44:12 +0100110static u64 file_extent_end(struct extent_buffer *leaf,
111 struct btrfs_key *key,
112 struct btrfs_file_extent_item *extent)
113{
114 u64 end;
115 u64 len;
116
117 if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
118 len = btrfs_file_extent_ram_bytes(leaf, extent);
119 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
120 } else {
121 len = btrfs_file_extent_num_bytes(leaf, extent);
122 end = key->offset + len;
123 }
124 return end;
125}
126
David Sterbaae2a19d2019-03-20 16:21:10 +0100127static int check_extent_data_item(struct extent_buffer *leaf,
Filipe Manana4e9845e2019-05-06 16:44:12 +0100128 struct btrfs_key *key, int slot,
129 struct btrfs_key *prev_key)
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000130{
David Sterbaae2a19d2019-03-20 16:21:10 +0100131 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000132 struct btrfs_file_extent_item *fi;
Qu Wenruo2f659542018-01-25 14:56:18 +0800133 u32 sectorsize = fs_info->sectorsize;
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000134 u32 item_size = btrfs_item_size_nr(leaf, slot);
Qu Wenruo4c094c32019-05-03 08:30:54 +0800135 u64 extent_end;
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000136
137 if (!IS_ALIGNED(key->offset, sectorsize)) {
David Sterba1fd715f2019-03-20 15:32:46 +0100138 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000139"unaligned file_offset for file extent, have %llu should be aligned to %u",
140 key->offset, sectorsize);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000141 return -EUCLEAN;
142 }
143
Qu Wenruoc18679e2019-08-26 15:40:38 +0800144 /*
145 * Previous key must have the same key->objectid (ino).
146 * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
147 * But if objectids mismatch, it means we have a missing
148 * INODE_ITEM.
149 */
150 if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) &&
151 prev_key->objectid != key->objectid) {
152 file_extent_err(leaf, slot,
153 "invalid previous key objectid, have %llu expect %llu",
154 prev_key->objectid, key->objectid);
155 return -EUCLEAN;
156 }
157
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000158 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
159
160 if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
David Sterba1fd715f2019-03-20 15:32:46 +0100161 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000162 "invalid type for file extent, have %u expect range [0, %u]",
163 btrfs_file_extent_type(leaf, fi),
164 BTRFS_FILE_EXTENT_TYPES);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000165 return -EUCLEAN;
166 }
167
168 /*
Andrea Gelmini52042d82018-11-28 12:05:13 +0100169 * Support for new compression/encryption must introduce incompat flag,
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000170 * and must be caught in open_ctree().
171 */
172 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
David Sterba1fd715f2019-03-20 15:32:46 +0100173 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000174 "invalid compression for file extent, have %u expect range [0, %u]",
175 btrfs_file_extent_compression(leaf, fi),
176 BTRFS_COMPRESS_TYPES);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000177 return -EUCLEAN;
178 }
179 if (btrfs_file_extent_encryption(leaf, fi)) {
David Sterba1fd715f2019-03-20 15:32:46 +0100180 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000181 "invalid encryption for file extent, have %u expect 0",
182 btrfs_file_extent_encryption(leaf, fi));
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000183 return -EUCLEAN;
184 }
185 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
186 /* Inline extent must have 0 as key offset */
187 if (key->offset) {
David Sterba1fd715f2019-03-20 15:32:46 +0100188 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000189 "invalid file_offset for inline file extent, have %llu expect 0",
190 key->offset);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000191 return -EUCLEAN;
192 }
193
194 /* Compressed inline extent has no on-disk size, skip it */
195 if (btrfs_file_extent_compression(leaf, fi) !=
196 BTRFS_COMPRESS_NONE)
197 return 0;
198
199 /* Uncompressed inline extent size must match item size */
200 if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
201 btrfs_file_extent_ram_bytes(leaf, fi)) {
David Sterba1fd715f2019-03-20 15:32:46 +0100202 file_extent_err(leaf, slot,
Qu Wenruo8806d712017-10-09 01:51:06 +0000203 "invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
204 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
205 btrfs_file_extent_ram_bytes(leaf, fi));
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000206 return -EUCLEAN;
207 }
208 return 0;
209 }
210
211 /* Regular or preallocated extent has fixed item size */
212 if (item_size != sizeof(*fi)) {
David Sterba1fd715f2019-03-20 15:32:46 +0100213 file_extent_err(leaf, slot,
Arnd Bergmann709a95c2017-10-13 11:27:35 +0200214 "invalid item size for reg/prealloc file extent, have %u expect %zu",
Qu Wenruo8806d712017-10-09 01:51:06 +0000215 item_size, sizeof(*fi));
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000216 return -EUCLEAN;
217 }
David Sterba033774d2019-03-20 15:59:22 +0100218 if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
219 CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
220 CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
221 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
222 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000223 return -EUCLEAN;
Filipe Manana4e9845e2019-05-06 16:44:12 +0100224
Qu Wenruo4c094c32019-05-03 08:30:54 +0800225 /* Catch extent end overflow */
226 if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
227 key->offset, &extent_end)) {
228 file_extent_err(leaf, slot,
229 "extent end overflow, have file offset %llu extent num bytes %llu",
230 key->offset,
231 btrfs_file_extent_num_bytes(leaf, fi));
232 return -EUCLEAN;
233 }
234
Filipe Manana4e9845e2019-05-06 16:44:12 +0100235 /*
236 * Check that no two consecutive file extent items, in the same leaf,
237 * present ranges that overlap each other.
238 */
239 if (slot > 0 &&
240 prev_key->objectid == key->objectid &&
241 prev_key->type == BTRFS_EXTENT_DATA_KEY) {
242 struct btrfs_file_extent_item *prev_fi;
243 u64 prev_end;
244
245 prev_fi = btrfs_item_ptr(leaf, slot - 1,
246 struct btrfs_file_extent_item);
247 prev_end = file_extent_end(leaf, prev_key, prev_fi);
248 if (prev_end > key->offset) {
249 file_extent_err(leaf, slot - 1,
250"file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
251 prev_end, key->offset);
252 return -EUCLEAN;
253 }
254 }
255
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000256 return 0;
257}
258
David Sterba68128ce2019-03-20 16:02:56 +0100259static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
Qu Wenruo2f659542018-01-25 14:56:18 +0800260 int slot)
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000261{
David Sterba68128ce2019-03-20 16:02:56 +0100262 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruo2f659542018-01-25 14:56:18 +0800263 u32 sectorsize = fs_info->sectorsize;
264 u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000265
266 if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
David Sterba86a6be32019-03-20 15:31:28 +0100267 generic_err(leaf, slot,
Qu Wenruod508c5f2017-10-09 01:51:05 +0000268 "invalid key objectid for csum item, have %llu expect %llu",
269 key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000270 return -EUCLEAN;
271 }
272 if (!IS_ALIGNED(key->offset, sectorsize)) {
David Sterba86a6be32019-03-20 15:31:28 +0100273 generic_err(leaf, slot,
Qu Wenruod508c5f2017-10-09 01:51:05 +0000274 "unaligned key offset for csum item, have %llu should be aligned to %u",
275 key->offset, sectorsize);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000276 return -EUCLEAN;
277 }
278 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
David Sterba86a6be32019-03-20 15:31:28 +0100279 generic_err(leaf, slot,
Qu Wenruod508c5f2017-10-09 01:51:05 +0000280 "unaligned item size for csum item, have %u should be aligned to %u",
281 btrfs_item_size_nr(leaf, slot), csumsize);
Qu Wenruo557ea5d2017-10-09 01:51:02 +0000282 return -EUCLEAN;
283 }
284 return 0;
285}
286
287/*
Qu Wenruoad7b03682017-11-08 08:54:25 +0800288 * Customized reported for dir_item, only important new info is key->objectid,
289 * which represents inode number
290 */
David Sterbad98ced62019-03-20 16:07:27 +0100291__printf(3, 4)
David Sterbae67c7182018-02-19 17:24:18 +0100292__cold
David Sterbad98ced62019-03-20 16:07:27 +0100293static void dir_item_err(const struct extent_buffer *eb, int slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800294 const char *fmt, ...)
295{
David Sterbad98ced62019-03-20 16:07:27 +0100296 const struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruoad7b03682017-11-08 08:54:25 +0800297 struct btrfs_key key;
298 struct va_format vaf;
299 va_list args;
300
301 btrfs_item_key_to_cpu(eb, &key, slot);
302 va_start(args, fmt);
303
304 vaf.fmt = fmt;
305 vaf.va = &args;
306
Qu Wenruo2f659542018-01-25 14:56:18 +0800307 btrfs_crit(fs_info,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800308 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
Qu Wenruo2f659542018-01-25 14:56:18 +0800309 btrfs_header_level(eb) == 0 ? "leaf" : "node",
310 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
311 key.objectid, &vaf);
Qu Wenruoad7b03682017-11-08 08:54:25 +0800312 va_end(args);
313}
314
David Sterbace4252c2019-03-20 16:17:46 +0100315static int check_dir_item(struct extent_buffer *leaf,
Qu Wenruoc18679e2019-08-26 15:40:38 +0800316 struct btrfs_key *key, struct btrfs_key *prev_key,
317 int slot)
Qu Wenruoad7b03682017-11-08 08:54:25 +0800318{
David Sterbace4252c2019-03-20 16:17:46 +0100319 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruoad7b03682017-11-08 08:54:25 +0800320 struct btrfs_dir_item *di;
321 u32 item_size = btrfs_item_size_nr(leaf, slot);
322 u32 cur = 0;
323
Qu Wenruoc18679e2019-08-26 15:40:38 +0800324 /* Same check as in check_extent_data_item() */
325 if (slot > 0 && is_fstree(btrfs_header_owner(leaf)) &&
326 prev_key->objectid != key->objectid) {
327 dir_item_err(leaf, slot,
328 "invalid previous key objectid, have %llu expect %llu",
329 prev_key->objectid, key->objectid);
330 return -EUCLEAN;
331 }
Qu Wenruoad7b03682017-11-08 08:54:25 +0800332 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
333 while (cur < item_size) {
Qu Wenruoad7b03682017-11-08 08:54:25 +0800334 u32 name_len;
335 u32 data_len;
336 u32 max_name_len;
337 u32 total_size;
338 u32 name_hash;
339 u8 dir_type;
340
341 /* header itself should not cross item boundary */
342 if (cur + sizeof(*di) > item_size) {
David Sterbad98ced62019-03-20 16:07:27 +0100343 dir_item_err(leaf, slot,
Arnd Bergmann7cfad652017-12-06 15:18:14 +0100344 "dir item header crosses item boundary, have %zu boundary %u",
Qu Wenruoad7b03682017-11-08 08:54:25 +0800345 cur + sizeof(*di), item_size);
346 return -EUCLEAN;
347 }
348
349 /* dir type check */
350 dir_type = btrfs_dir_type(leaf, di);
351 if (dir_type >= BTRFS_FT_MAX) {
David Sterbad98ced62019-03-20 16:07:27 +0100352 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800353 "invalid dir item type, have %u expect [0, %u)",
354 dir_type, BTRFS_FT_MAX);
355 return -EUCLEAN;
356 }
357
358 if (key->type == BTRFS_XATTR_ITEM_KEY &&
359 dir_type != BTRFS_FT_XATTR) {
David Sterbad98ced62019-03-20 16:07:27 +0100360 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800361 "invalid dir item type for XATTR key, have %u expect %u",
362 dir_type, BTRFS_FT_XATTR);
363 return -EUCLEAN;
364 }
365 if (dir_type == BTRFS_FT_XATTR &&
366 key->type != BTRFS_XATTR_ITEM_KEY) {
David Sterbad98ced62019-03-20 16:07:27 +0100367 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800368 "xattr dir type found for non-XATTR key");
369 return -EUCLEAN;
370 }
371 if (dir_type == BTRFS_FT_XATTR)
372 max_name_len = XATTR_NAME_MAX;
373 else
374 max_name_len = BTRFS_NAME_LEN;
375
376 /* Name/data length check */
377 name_len = btrfs_dir_name_len(leaf, di);
378 data_len = btrfs_dir_data_len(leaf, di);
379 if (name_len > max_name_len) {
David Sterbad98ced62019-03-20 16:07:27 +0100380 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800381 "dir item name len too long, have %u max %u",
382 name_len, max_name_len);
383 return -EUCLEAN;
384 }
Qu Wenruo2f659542018-01-25 14:56:18 +0800385 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
David Sterbad98ced62019-03-20 16:07:27 +0100386 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800387 "dir item name and data len too long, have %u max %u",
388 name_len + data_len,
Qu Wenruo2f659542018-01-25 14:56:18 +0800389 BTRFS_MAX_XATTR_SIZE(fs_info));
Qu Wenruoad7b03682017-11-08 08:54:25 +0800390 return -EUCLEAN;
391 }
392
393 if (data_len && dir_type != BTRFS_FT_XATTR) {
David Sterbad98ced62019-03-20 16:07:27 +0100394 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800395 "dir item with invalid data len, have %u expect 0",
396 data_len);
397 return -EUCLEAN;
398 }
399
400 total_size = sizeof(*di) + name_len + data_len;
401
402 /* header and name/data should not cross item boundary */
403 if (cur + total_size > item_size) {
David Sterbad98ced62019-03-20 16:07:27 +0100404 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800405 "dir item data crosses item boundary, have %u boundary %u",
406 cur + total_size, item_size);
407 return -EUCLEAN;
408 }
409
410 /*
411 * Special check for XATTR/DIR_ITEM, as key->offset is name
412 * hash, should match its name
413 */
414 if (key->type == BTRFS_DIR_ITEM_KEY ||
415 key->type == BTRFS_XATTR_ITEM_KEY) {
David Sterbae2683fc2018-01-10 15:13:07 +0100416 char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
417
Qu Wenruoad7b03682017-11-08 08:54:25 +0800418 read_extent_buffer(leaf, namebuf,
419 (unsigned long)(di + 1), name_len);
420 name_hash = btrfs_name_hash(namebuf, name_len);
421 if (key->offset != name_hash) {
David Sterbad98ced62019-03-20 16:07:27 +0100422 dir_item_err(leaf, slot,
Qu Wenruoad7b03682017-11-08 08:54:25 +0800423 "name hash mismatch with key, have 0x%016x expect 0x%016llx",
424 name_hash, key->offset);
425 return -EUCLEAN;
426 }
427 }
428 cur += total_size;
429 di = (struct btrfs_dir_item *)((void *)di + total_size);
430 }
431 return 0;
432}
433
David Sterba4806bd82019-03-20 16:18:57 +0100434__printf(3, 4)
Qu Wenruofce466e2018-07-03 17:10:05 +0800435__cold
David Sterba4806bd82019-03-20 16:18:57 +0100436static void block_group_err(const struct extent_buffer *eb, int slot,
Qu Wenruofce466e2018-07-03 17:10:05 +0800437 const char *fmt, ...)
438{
David Sterba4806bd82019-03-20 16:18:57 +0100439 const struct btrfs_fs_info *fs_info = eb->fs_info;
Qu Wenruofce466e2018-07-03 17:10:05 +0800440 struct btrfs_key key;
441 struct va_format vaf;
442 va_list args;
443
444 btrfs_item_key_to_cpu(eb, &key, slot);
445 va_start(args, fmt);
446
447 vaf.fmt = fmt;
448 vaf.va = &args;
449
450 btrfs_crit(fs_info,
451 "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
452 btrfs_header_level(eb) == 0 ? "leaf" : "node",
453 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
454 key.objectid, key.offset, &vaf);
455 va_end(args);
456}
457
David Sterbaaf60ce22019-03-20 16:19:31 +0100458static int check_block_group_item(struct extent_buffer *leaf,
Qu Wenruofce466e2018-07-03 17:10:05 +0800459 struct btrfs_key *key, int slot)
460{
461 struct btrfs_block_group_item bgi;
462 u32 item_size = btrfs_item_size_nr(leaf, slot);
463 u64 flags;
464 u64 type;
465
466 /*
467 * Here we don't really care about alignment since extent allocator can
Qu Wenruo10950922018-11-23 09:06:36 +0800468 * handle it. We care more about the size.
Qu Wenruofce466e2018-07-03 17:10:05 +0800469 */
Qu Wenruo10950922018-11-23 09:06:36 +0800470 if (key->offset == 0) {
David Sterba4806bd82019-03-20 16:18:57 +0100471 block_group_err(leaf, slot,
Qu Wenruo10950922018-11-23 09:06:36 +0800472 "invalid block group size 0");
Qu Wenruofce466e2018-07-03 17:10:05 +0800473 return -EUCLEAN;
474 }
475
476 if (item_size != sizeof(bgi)) {
David Sterba4806bd82019-03-20 16:18:57 +0100477 block_group_err(leaf, slot,
Qu Wenruofce466e2018-07-03 17:10:05 +0800478 "invalid item size, have %u expect %zu",
479 item_size, sizeof(bgi));
480 return -EUCLEAN;
481 }
482
483 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
484 sizeof(bgi));
485 if (btrfs_block_group_chunk_objectid(&bgi) !=
486 BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
David Sterba4806bd82019-03-20 16:18:57 +0100487 block_group_err(leaf, slot,
Qu Wenruofce466e2018-07-03 17:10:05 +0800488 "invalid block group chunk objectid, have %llu expect %llu",
489 btrfs_block_group_chunk_objectid(&bgi),
490 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
491 return -EUCLEAN;
492 }
493
494 if (btrfs_block_group_used(&bgi) > key->offset) {
David Sterba4806bd82019-03-20 16:18:57 +0100495 block_group_err(leaf, slot,
Qu Wenruofce466e2018-07-03 17:10:05 +0800496 "invalid block group used, have %llu expect [0, %llu)",
497 btrfs_block_group_used(&bgi), key->offset);
498 return -EUCLEAN;
499 }
500
501 flags = btrfs_block_group_flags(&bgi);
502 if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
David Sterba4806bd82019-03-20 16:18:57 +0100503 block_group_err(leaf, slot,
Qu Wenruofce466e2018-07-03 17:10:05 +0800504"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
505 flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
506 hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
507 return -EUCLEAN;
508 }
509
510 type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
511 if (type != BTRFS_BLOCK_GROUP_DATA &&
512 type != BTRFS_BLOCK_GROUP_METADATA &&
513 type != BTRFS_BLOCK_GROUP_SYSTEM &&
514 type != (BTRFS_BLOCK_GROUP_METADATA |
515 BTRFS_BLOCK_GROUP_DATA)) {
David Sterba4806bd82019-03-20 16:18:57 +0100516 block_group_err(leaf, slot,
Shaokun Zhang761333f2018-11-05 18:49:09 +0800517"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
Qu Wenruofce466e2018-07-03 17:10:05 +0800518 type, hweight64(type),
519 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
520 BTRFS_BLOCK_GROUP_SYSTEM,
521 BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
522 return -EUCLEAN;
523 }
524 return 0;
525}
526
David Sterbad001e4a2019-03-20 16:22:58 +0100527__printf(4, 5)
Qu Wenruof1140242019-03-20 13:36:06 +0800528__cold
David Sterbad001e4a2019-03-20 16:22:58 +0100529static void chunk_err(const struct extent_buffer *leaf,
Qu Wenruof1140242019-03-20 13:36:06 +0800530 const struct btrfs_chunk *chunk, u64 logical,
531 const char *fmt, ...)
532{
David Sterbad001e4a2019-03-20 16:22:58 +0100533 const struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruof1140242019-03-20 13:36:06 +0800534 bool is_sb;
535 struct va_format vaf;
536 va_list args;
537 int i;
538 int slot = -1;
539
540 /* Only superblock eb is able to have such small offset */
541 is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
542
543 if (!is_sb) {
544 /*
545 * Get the slot number by iterating through all slots, this
546 * would provide better readability.
547 */
548 for (i = 0; i < btrfs_header_nritems(leaf); i++) {
549 if (btrfs_item_ptr_offset(leaf, i) ==
550 (unsigned long)chunk) {
551 slot = i;
552 break;
553 }
554 }
555 }
556 va_start(args, fmt);
557 vaf.fmt = fmt;
558 vaf.va = &args;
559
560 if (is_sb)
561 btrfs_crit(fs_info,
562 "corrupt superblock syschunk array: chunk_start=%llu, %pV",
563 logical, &vaf);
564 else
565 btrfs_crit(fs_info,
566 "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
567 BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
568 logical, &vaf);
569 va_end(args);
570}
571
Qu Wenruoad7b03682017-11-08 08:54:25 +0800572/*
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800573 * The common chunk check which could also work on super block sys chunk array.
574 *
Qu Wenruobf871c32019-03-20 13:39:14 +0800575 * Return -EUCLEAN if anything is corrupted.
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800576 * Return 0 if everything is OK.
577 */
David Sterbaddaf1d52019-03-20 16:40:48 +0100578int btrfs_check_chunk_valid(struct extent_buffer *leaf,
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800579 struct btrfs_chunk *chunk, u64 logical)
580{
David Sterbaddaf1d52019-03-20 16:40:48 +0100581 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800582 u64 length;
583 u64 stripe_len;
584 u16 num_stripes;
585 u16 sub_stripes;
586 u64 type;
587 u64 features;
588 bool mixed = false;
589
590 length = btrfs_chunk_length(leaf, chunk);
591 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
592 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
593 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
594 type = btrfs_chunk_type(leaf, chunk);
595
596 if (!num_stripes) {
David Sterbad001e4a2019-03-20 16:22:58 +0100597 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800598 "invalid chunk num_stripes, have %u", num_stripes);
Qu Wenruobf871c32019-03-20 13:39:14 +0800599 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800600 }
601 if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
David Sterbad001e4a2019-03-20 16:22:58 +0100602 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800603 "invalid chunk logical, have %llu should aligned to %u",
604 logical, fs_info->sectorsize);
Qu Wenruobf871c32019-03-20 13:39:14 +0800605 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800606 }
607 if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
David Sterbad001e4a2019-03-20 16:22:58 +0100608 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800609 "invalid chunk sectorsize, have %u expect %u",
610 btrfs_chunk_sector_size(leaf, chunk),
611 fs_info->sectorsize);
Qu Wenruobf871c32019-03-20 13:39:14 +0800612 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800613 }
614 if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
David Sterbad001e4a2019-03-20 16:22:58 +0100615 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800616 "invalid chunk length, have %llu", length);
Qu Wenruobf871c32019-03-20 13:39:14 +0800617 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800618 }
619 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
David Sterbad001e4a2019-03-20 16:22:58 +0100620 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800621 "invalid chunk stripe length: %llu",
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800622 stripe_len);
Qu Wenruobf871c32019-03-20 13:39:14 +0800623 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800624 }
625 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
626 type) {
David Sterbad001e4a2019-03-20 16:22:58 +0100627 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800628 "unrecognized chunk type: 0x%llx",
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800629 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
630 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
631 btrfs_chunk_type(leaf, chunk));
Qu Wenruobf871c32019-03-20 13:39:14 +0800632 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800633 }
634
Qu Wenruo80e46cf2019-03-13 12:17:50 +0800635 if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
636 (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
David Sterbad001e4a2019-03-20 16:22:58 +0100637 chunk_err(leaf, chunk, logical,
Qu Wenruo80e46cf2019-03-13 12:17:50 +0800638 "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
639 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
640 return -EUCLEAN;
641 }
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800642 if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
David Sterbad001e4a2019-03-20 16:22:58 +0100643 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800644 "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
645 type, BTRFS_BLOCK_GROUP_TYPE_MASK);
Qu Wenruobf871c32019-03-20 13:39:14 +0800646 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800647 }
648
649 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
650 (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
David Sterbad001e4a2019-03-20 16:22:58 +0100651 chunk_err(leaf, chunk, logical,
Qu Wenruof1140242019-03-20 13:36:06 +0800652 "system chunk with data or metadata type: 0x%llx",
653 type);
Qu Wenruobf871c32019-03-20 13:39:14 +0800654 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800655 }
656
657 features = btrfs_super_incompat_flags(fs_info->super_copy);
658 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
659 mixed = true;
660
661 if (!mixed) {
662 if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
663 (type & BTRFS_BLOCK_GROUP_DATA)) {
David Sterbad001e4a2019-03-20 16:22:58 +0100664 chunk_err(leaf, chunk, logical,
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800665 "mixed chunk type in non-mixed mode: 0x%llx", type);
Qu Wenruobf871c32019-03-20 13:39:14 +0800666 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800667 }
668 }
669
670 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
671 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
672 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
673 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
674 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
675 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
David Sterbad001e4a2019-03-20 16:22:58 +0100676 chunk_err(leaf, chunk, logical,
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800677 "invalid num_stripes:sub_stripes %u:%u for profile %llu",
678 num_stripes, sub_stripes,
679 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
Qu Wenruobf871c32019-03-20 13:39:14 +0800680 return -EUCLEAN;
Qu Wenruo82fc28f2019-03-20 13:16:42 +0800681 }
682
683 return 0;
684}
685
David Sterba5617ed82019-03-20 16:22:58 +0100686__printf(3, 4)
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800687__cold
David Sterba5617ed82019-03-20 16:22:58 +0100688static void dev_item_err(const struct extent_buffer *eb, int slot,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800689 const char *fmt, ...)
690{
691 struct btrfs_key key;
692 struct va_format vaf;
693 va_list args;
694
695 btrfs_item_key_to_cpu(eb, &key, slot);
696 va_start(args, fmt);
697
698 vaf.fmt = fmt;
699 vaf.va = &args;
700
David Sterba5617ed82019-03-20 16:22:58 +0100701 btrfs_crit(eb->fs_info,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800702 "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
703 btrfs_header_level(eb) == 0 ? "leaf" : "node",
704 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
705 key.objectid, &vaf);
706 va_end(args);
707}
708
David Sterba412a2312019-03-20 16:22:58 +0100709static int check_dev_item(struct extent_buffer *leaf,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800710 struct btrfs_key *key, int slot)
711{
712 struct btrfs_dev_item *ditem;
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800713
714 if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
David Sterba5617ed82019-03-20 16:22:58 +0100715 dev_item_err(leaf, slot,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800716 "invalid objectid: has=%llu expect=%llu",
717 key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
718 return -EUCLEAN;
719 }
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800720 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
721 if (btrfs_device_id(leaf, ditem) != key->offset) {
David Sterba5617ed82019-03-20 16:22:58 +0100722 dev_item_err(leaf, slot,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800723 "devid mismatch: key has=%llu item has=%llu",
724 key->offset, btrfs_device_id(leaf, ditem));
725 return -EUCLEAN;
726 }
727
728 /*
729 * For device total_bytes, we don't have reliable way to check it, as
730 * it can be 0 for device removal. Device size check can only be done
731 * by dev extents check.
732 */
733 if (btrfs_device_bytes_used(leaf, ditem) >
734 btrfs_device_total_bytes(leaf, ditem)) {
David Sterba5617ed82019-03-20 16:22:58 +0100735 dev_item_err(leaf, slot,
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800736 "invalid bytes used: have %llu expect [0, %llu]",
737 btrfs_device_bytes_used(leaf, ditem),
738 btrfs_device_total_bytes(leaf, ditem));
739 return -EUCLEAN;
740 }
741 /*
742 * Remaining members like io_align/type/gen/dev_group aren't really
743 * utilized. Skip them to make later usage of them easier.
744 */
745 return 0;
746}
747
Qu Wenruo496245c2019-03-13 14:31:35 +0800748/* Inode item error output has the same format as dir_item_err() */
749#define inode_item_err(fs_info, eb, slot, fmt, ...) \
David Sterbad98ced62019-03-20 16:07:27 +0100750 dir_item_err(eb, slot, fmt, __VA_ARGS__)
Qu Wenruo496245c2019-03-13 14:31:35 +0800751
David Sterba39e57f42019-03-20 16:22:58 +0100752static int check_inode_item(struct extent_buffer *leaf,
Qu Wenruo496245c2019-03-13 14:31:35 +0800753 struct btrfs_key *key, int slot)
754{
David Sterba39e57f42019-03-20 16:22:58 +0100755 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruo496245c2019-03-13 14:31:35 +0800756 struct btrfs_inode_item *iitem;
757 u64 super_gen = btrfs_super_generation(fs_info->super_copy);
758 u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
759 u32 mode;
760
761 if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
762 key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
763 key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
764 key->objectid != BTRFS_FREE_INO_OBJECTID) {
David Sterba86a6be32019-03-20 15:31:28 +0100765 generic_err(leaf, slot,
Qu Wenruo496245c2019-03-13 14:31:35 +0800766 "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
767 key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
768 BTRFS_FIRST_FREE_OBJECTID,
769 BTRFS_LAST_FREE_OBJECTID,
770 BTRFS_FREE_INO_OBJECTID);
771 return -EUCLEAN;
772 }
773 if (key->offset != 0) {
774 inode_item_err(fs_info, leaf, slot,
775 "invalid key offset: has %llu expect 0",
776 key->offset);
777 return -EUCLEAN;
778 }
779 iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
780
781 /* Here we use super block generation + 1 to handle log tree */
782 if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
783 inode_item_err(fs_info, leaf, slot,
784 "invalid inode generation: has %llu expect (0, %llu]",
785 btrfs_inode_generation(leaf, iitem),
786 super_gen + 1);
787 return -EUCLEAN;
788 }
789 /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
790 if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
791 inode_item_err(fs_info, leaf, slot,
792 "invalid inode generation: has %llu expect [0, %llu]",
793 btrfs_inode_transid(leaf, iitem), super_gen + 1);
794 return -EUCLEAN;
795 }
796
797 /*
798 * For size and nbytes it's better not to be too strict, as for dir
799 * item its size/nbytes can easily get wrong, but doesn't affect
800 * anything in the fs. So here we skip the check.
801 */
802 mode = btrfs_inode_mode(leaf, iitem);
803 if (mode & ~valid_mask) {
804 inode_item_err(fs_info, leaf, slot,
805 "unknown mode bit detected: 0x%x",
806 mode & ~valid_mask);
807 return -EUCLEAN;
808 }
809
810 /*
811 * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
812 * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
813 * Only needs to check BLK, LNK and SOCKS
814 */
815 if (!is_power_of_2(mode & S_IFMT)) {
816 if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
817 inode_item_err(fs_info, leaf, slot,
818 "invalid mode: has 0%o expect valid S_IF* bit(s)",
819 mode & S_IFMT);
820 return -EUCLEAN;
821 }
822 }
823 if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
824 inode_item_err(fs_info, leaf, slot,
825 "invalid nlink: has %u expect no more than 1 for dir",
826 btrfs_inode_nlink(leaf, iitem));
827 return -EUCLEAN;
828 }
829 if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
830 inode_item_err(fs_info, leaf, slot,
831 "unknown flags detected: 0x%llx",
832 btrfs_inode_flags(leaf, iitem) &
833 ~BTRFS_INODE_FLAG_MASK);
834 return -EUCLEAN;
835 }
836 return 0;
837}
838
Qu Wenruo259ee772019-07-16 17:00:34 +0800839static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
840 int slot)
841{
842 struct btrfs_fs_info *fs_info = leaf->fs_info;
843 struct btrfs_root_item ri;
844 const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
845 BTRFS_ROOT_SUBVOL_DEAD;
846
847 /* No such tree id */
848 if (key->objectid == 0) {
849 generic_err(leaf, slot, "invalid root id 0");
850 return -EUCLEAN;
851 }
852
853 /*
854 * Some older kernel may create ROOT_ITEM with non-zero offset, so here
855 * we only check offset for reloc tree whose key->offset must be a
856 * valid tree.
857 */
858 if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
859 generic_err(leaf, slot, "invalid root id 0 for reloc tree");
860 return -EUCLEAN;
861 }
862
863 if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
864 generic_err(leaf, slot,
865 "invalid root item size, have %u expect %zu",
866 btrfs_item_size_nr(leaf, slot), sizeof(ri));
867 }
868
869 read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
870 sizeof(ri));
871
872 /* Generation related */
873 if (btrfs_root_generation(&ri) >
874 btrfs_super_generation(fs_info->super_copy) + 1) {
875 generic_err(leaf, slot,
876 "invalid root generation, have %llu expect (0, %llu]",
877 btrfs_root_generation(&ri),
878 btrfs_super_generation(fs_info->super_copy) + 1);
879 return -EUCLEAN;
880 }
881 if (btrfs_root_generation_v2(&ri) >
882 btrfs_super_generation(fs_info->super_copy) + 1) {
883 generic_err(leaf, slot,
884 "invalid root v2 generation, have %llu expect (0, %llu]",
885 btrfs_root_generation_v2(&ri),
886 btrfs_super_generation(fs_info->super_copy) + 1);
887 return -EUCLEAN;
888 }
889 if (btrfs_root_last_snapshot(&ri) >
890 btrfs_super_generation(fs_info->super_copy) + 1) {
891 generic_err(leaf, slot,
892 "invalid root last_snapshot, have %llu expect (0, %llu]",
893 btrfs_root_last_snapshot(&ri),
894 btrfs_super_generation(fs_info->super_copy) + 1);
895 return -EUCLEAN;
896 }
897
898 /* Alignment and level check */
899 if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
900 generic_err(leaf, slot,
901 "invalid root bytenr, have %llu expect to be aligned to %u",
902 btrfs_root_bytenr(&ri), fs_info->sectorsize);
903 return -EUCLEAN;
904 }
905 if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
906 generic_err(leaf, slot,
907 "invalid root level, have %u expect [0, %u]",
908 btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
909 return -EUCLEAN;
910 }
911 if (ri.drop_level >= BTRFS_MAX_LEVEL) {
912 generic_err(leaf, slot,
913 "invalid root level, have %u expect [0, %u]",
914 ri.drop_level, BTRFS_MAX_LEVEL - 1);
915 return -EUCLEAN;
916 }
917
918 /* Flags check */
919 if (btrfs_root_flags(&ri) & ~valid_root_flags) {
920 generic_err(leaf, slot,
921 "invalid root flags, have 0x%llx expect mask 0x%llx",
922 btrfs_root_flags(&ri), valid_root_flags);
923 return -EUCLEAN;
924 }
925 return 0;
926}
927
Qu Wenruof82d1c7c2019-08-09 09:24:22 +0800928__printf(3,4)
929__cold
930static void extent_err(const struct extent_buffer *eb, int slot,
931 const char *fmt, ...)
932{
933 struct btrfs_key key;
934 struct va_format vaf;
935 va_list args;
936 u64 bytenr;
937 u64 len;
938
939 btrfs_item_key_to_cpu(eb, &key, slot);
940 bytenr = key.objectid;
Qu Wenruoe2406a62019-08-09 09:24:23 +0800941 if (key.type == BTRFS_METADATA_ITEM_KEY ||
942 key.type == BTRFS_TREE_BLOCK_REF_KEY ||
943 key.type == BTRFS_SHARED_BLOCK_REF_KEY)
Qu Wenruof82d1c7c2019-08-09 09:24:22 +0800944 len = eb->fs_info->nodesize;
945 else
946 len = key.offset;
947 va_start(args, fmt);
948
949 vaf.fmt = fmt;
950 vaf.va = &args;
951
952 btrfs_crit(eb->fs_info,
953 "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
954 btrfs_header_level(eb) == 0 ? "leaf" : "node",
955 eb->start, slot, bytenr, len, &vaf);
956 va_end(args);
957}
958
959static int check_extent_item(struct extent_buffer *leaf,
960 struct btrfs_key *key, int slot)
961{
962 struct btrfs_fs_info *fs_info = leaf->fs_info;
963 struct btrfs_extent_item *ei;
964 bool is_tree_block = false;
965 unsigned long ptr; /* Current pointer inside inline refs */
966 unsigned long end; /* Extent item end */
967 const u32 item_size = btrfs_item_size_nr(leaf, slot);
968 u64 flags;
969 u64 generation;
970 u64 total_refs; /* Total refs in btrfs_extent_item */
971 u64 inline_refs = 0; /* found total inline refs */
972
973 if (key->type == BTRFS_METADATA_ITEM_KEY &&
974 !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
975 generic_err(leaf, slot,
976"invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
977 return -EUCLEAN;
978 }
979 /* key->objectid is the bytenr for both key types */
980 if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
981 generic_err(leaf, slot,
982 "invalid key objectid, have %llu expect to be aligned to %u",
983 key->objectid, fs_info->sectorsize);
984 return -EUCLEAN;
985 }
986
987 /* key->offset is tree level for METADATA_ITEM_KEY */
988 if (key->type == BTRFS_METADATA_ITEM_KEY &&
989 key->offset >= BTRFS_MAX_LEVEL) {
990 extent_err(leaf, slot,
991 "invalid tree level, have %llu expect [0, %u]",
992 key->offset, BTRFS_MAX_LEVEL - 1);
993 return -EUCLEAN;
994 }
995
996 /*
997 * EXTENT/METADATA_ITEM consists of:
998 * 1) One btrfs_extent_item
999 * Records the total refs, type and generation of the extent.
1000 *
1001 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
1002 * Records the first key and level of the tree block.
1003 *
1004 * 2) Zero or more btrfs_extent_inline_ref(s)
1005 * Each inline ref has one btrfs_extent_inline_ref shows:
1006 * 2.1) The ref type, one of the 4
1007 * TREE_BLOCK_REF Tree block only
1008 * SHARED_BLOCK_REF Tree block only
1009 * EXTENT_DATA_REF Data only
1010 * SHARED_DATA_REF Data only
1011 * 2.2) Ref type specific data
1012 * Either using btrfs_extent_inline_ref::offset, or specific
1013 * data structure.
1014 */
1015 if (item_size < sizeof(*ei)) {
1016 extent_err(leaf, slot,
1017 "invalid item size, have %u expect [%zu, %u)",
1018 item_size, sizeof(*ei),
1019 BTRFS_LEAF_DATA_SIZE(fs_info));
1020 return -EUCLEAN;
1021 }
1022 end = item_size + btrfs_item_ptr_offset(leaf, slot);
1023
1024 /* Checks against extent_item */
1025 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1026 flags = btrfs_extent_flags(leaf, ei);
1027 total_refs = btrfs_extent_refs(leaf, ei);
1028 generation = btrfs_extent_generation(leaf, ei);
1029 if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
1030 extent_err(leaf, slot,
1031 "invalid generation, have %llu expect (0, %llu]",
1032 generation,
1033 btrfs_super_generation(fs_info->super_copy) + 1);
1034 return -EUCLEAN;
1035 }
1036 if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA |
1037 BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
1038 extent_err(leaf, slot,
1039 "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
1040 flags, BTRFS_EXTENT_FLAG_DATA |
1041 BTRFS_EXTENT_FLAG_TREE_BLOCK);
1042 return -EUCLEAN;
1043 }
1044 is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
1045 if (is_tree_block) {
1046 if (key->type == BTRFS_EXTENT_ITEM_KEY &&
1047 key->offset != fs_info->nodesize) {
1048 extent_err(leaf, slot,
1049 "invalid extent length, have %llu expect %u",
1050 key->offset, fs_info->nodesize);
1051 return -EUCLEAN;
1052 }
1053 } else {
1054 if (key->type != BTRFS_EXTENT_ITEM_KEY) {
1055 extent_err(leaf, slot,
1056 "invalid key type, have %u expect %u for data backref",
1057 key->type, BTRFS_EXTENT_ITEM_KEY);
1058 return -EUCLEAN;
1059 }
1060 if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
1061 extent_err(leaf, slot,
1062 "invalid extent length, have %llu expect aligned to %u",
1063 key->offset, fs_info->sectorsize);
1064 return -EUCLEAN;
1065 }
1066 }
1067 ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);
1068
1069 /* Check the special case of btrfs_tree_block_info */
1070 if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
1071 struct btrfs_tree_block_info *info;
1072
1073 info = (struct btrfs_tree_block_info *)ptr;
1074 if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
1075 extent_err(leaf, slot,
1076 "invalid tree block info level, have %u expect [0, %u]",
1077 btrfs_tree_block_level(leaf, info),
1078 BTRFS_MAX_LEVEL - 1);
1079 return -EUCLEAN;
1080 }
1081 ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
1082 }
1083
1084 /* Check inline refs */
1085 while (ptr < end) {
1086 struct btrfs_extent_inline_ref *iref;
1087 struct btrfs_extent_data_ref *dref;
1088 struct btrfs_shared_data_ref *sref;
1089 u64 dref_offset;
1090 u64 inline_offset;
1091 u8 inline_type;
1092
1093 if (ptr + sizeof(*iref) > end) {
1094 extent_err(leaf, slot,
1095"inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
1096 ptr, sizeof(*iref), end);
1097 return -EUCLEAN;
1098 }
1099 iref = (struct btrfs_extent_inline_ref *)ptr;
1100 inline_type = btrfs_extent_inline_ref_type(leaf, iref);
1101 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1102 if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
1103 extent_err(leaf, slot,
1104"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
1105 ptr, inline_type, end);
1106 return -EUCLEAN;
1107 }
1108
1109 switch (inline_type) {
1110 /* inline_offset is subvolid of the owner, no need to check */
1111 case BTRFS_TREE_BLOCK_REF_KEY:
1112 inline_refs++;
1113 break;
1114 /* Contains parent bytenr */
1115 case BTRFS_SHARED_BLOCK_REF_KEY:
1116 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
1117 extent_err(leaf, slot,
1118 "invalid tree parent bytenr, have %llu expect aligned to %u",
1119 inline_offset, fs_info->sectorsize);
1120 return -EUCLEAN;
1121 }
1122 inline_refs++;
1123 break;
1124 /*
1125 * Contains owner subvolid, owner key objectid, adjusted offset.
1126 * The only obvious corruption can happen in that offset.
1127 */
1128 case BTRFS_EXTENT_DATA_REF_KEY:
1129 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1130 dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
1131 if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
1132 extent_err(leaf, slot,
1133 "invalid data ref offset, have %llu expect aligned to %u",
1134 dref_offset, fs_info->sectorsize);
1135 return -EUCLEAN;
1136 }
1137 inline_refs += btrfs_extent_data_ref_count(leaf, dref);
1138 break;
1139 /* Contains parent bytenr and ref count */
1140 case BTRFS_SHARED_DATA_REF_KEY:
1141 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1142 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
1143 extent_err(leaf, slot,
1144 "invalid data parent bytenr, have %llu expect aligned to %u",
1145 inline_offset, fs_info->sectorsize);
1146 return -EUCLEAN;
1147 }
1148 inline_refs += btrfs_shared_data_ref_count(leaf, sref);
1149 break;
1150 default:
1151 extent_err(leaf, slot, "unknown inline ref type: %u",
1152 inline_type);
1153 return -EUCLEAN;
1154 }
1155 ptr += btrfs_extent_inline_ref_size(inline_type);
1156 }
1157 /* No padding is allowed */
1158 if (ptr != end) {
1159 extent_err(leaf, slot,
1160 "invalid extent item size, padding bytes found");
1161 return -EUCLEAN;
1162 }
1163
1164 /* Finally, check the inline refs against total refs */
1165 if (inline_refs > total_refs) {
1166 extent_err(leaf, slot,
1167 "invalid extent refs, have %llu expect >= inline %llu",
1168 total_refs, inline_refs);
1169 return -EUCLEAN;
1170 }
1171 return 0;
1172}
1173
Qu Wenruoe2406a62019-08-09 09:24:23 +08001174static int check_simple_keyed_refs(struct extent_buffer *leaf,
1175 struct btrfs_key *key, int slot)
1176{
1177 u32 expect_item_size = 0;
1178
1179 if (key->type == BTRFS_SHARED_DATA_REF_KEY)
1180 expect_item_size = sizeof(struct btrfs_shared_data_ref);
1181
1182 if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
1183 generic_err(leaf, slot,
1184 "invalid item size, have %u expect %u for key type %u",
1185 btrfs_item_size_nr(leaf, slot),
1186 expect_item_size, key->type);
1187 return -EUCLEAN;
1188 }
1189 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1190 generic_err(leaf, slot,
1191"invalid key objectid for shared block ref, have %llu expect aligned to %u",
1192 key->objectid, leaf->fs_info->sectorsize);
1193 return -EUCLEAN;
1194 }
1195 if (key->type != BTRFS_TREE_BLOCK_REF_KEY &&
1196 !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
1197 extent_err(leaf, slot,
1198 "invalid tree parent bytenr, have %llu expect aligned to %u",
1199 key->offset, leaf->fs_info->sectorsize);
1200 return -EUCLEAN;
1201 }
1202 return 0;
1203}
1204
Qu Wenruo0785a9a2019-08-09 09:24:24 +08001205static int check_extent_data_ref(struct extent_buffer *leaf,
1206 struct btrfs_key *key, int slot)
1207{
1208 struct btrfs_extent_data_ref *dref;
1209 unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
1210 const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
1211
1212 if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
1213 generic_err(leaf, slot,
1214 "invalid item size, have %u expect aligned to %zu for key type %u",
1215 btrfs_item_size_nr(leaf, slot),
1216 sizeof(*dref), key->type);
1217 }
1218 if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1219 generic_err(leaf, slot,
1220"invalid key objectid for shared block ref, have %llu expect aligned to %u",
1221 key->objectid, leaf->fs_info->sectorsize);
1222 return -EUCLEAN;
1223 }
1224 for (; ptr < end; ptr += sizeof(*dref)) {
1225 u64 root_objectid;
1226 u64 owner;
1227 u64 offset;
1228 u64 hash;
1229
1230 dref = (struct btrfs_extent_data_ref *)ptr;
1231 root_objectid = btrfs_extent_data_ref_root(leaf, dref);
1232 owner = btrfs_extent_data_ref_objectid(leaf, dref);
1233 offset = btrfs_extent_data_ref_offset(leaf, dref);
1234 hash = hash_extent_data_ref(root_objectid, owner, offset);
1235 if (hash != key->offset) {
1236 extent_err(leaf, slot,
1237 "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
1238 hash, key->offset);
1239 return -EUCLEAN;
1240 }
1241 if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
1242 extent_err(leaf, slot,
1243 "invalid extent data backref offset, have %llu expect aligned to %u",
1244 offset, leaf->fs_info->sectorsize);
1245 }
1246 }
1247 return 0;
1248}
1249
Qu Wenruo71bf92a92019-08-26 15:40:39 +08001250#define inode_ref_err(fs_info, eb, slot, fmt, args...) \
1251 inode_item_err(fs_info, eb, slot, fmt, ##args)
1252static int check_inode_ref(struct extent_buffer *leaf,
1253 struct btrfs_key *key, struct btrfs_key *prev_key,
1254 int slot)
1255{
1256 struct btrfs_inode_ref *iref;
1257 unsigned long ptr;
1258 unsigned long end;
1259
1260 /* namelen can't be 0, so item_size == sizeof() is also invalid */
1261 if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
1262 inode_ref_err(fs_info, leaf, slot,
1263 "invalid item size, have %u expect (%zu, %u)",
1264 btrfs_item_size_nr(leaf, slot),
1265 sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
1266 return -EUCLEAN;
1267 }
1268
1269 ptr = btrfs_item_ptr_offset(leaf, slot);
1270 end = ptr + btrfs_item_size_nr(leaf, slot);
1271 while (ptr < end) {
1272 u16 namelen;
1273
1274 if (ptr + sizeof(iref) > end) {
1275 inode_ref_err(fs_info, leaf, slot,
1276 "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
1277 ptr, end, sizeof(iref));
1278 return -EUCLEAN;
1279 }
1280
1281 iref = (struct btrfs_inode_ref *)ptr;
1282 namelen = btrfs_inode_ref_name_len(leaf, iref);
1283 if (ptr + sizeof(*iref) + namelen > end) {
1284 inode_ref_err(fs_info, leaf, slot,
1285 "inode ref overflow, ptr %lu end %lu namelen %u",
1286 ptr, end, namelen);
1287 return -EUCLEAN;
1288 }
1289
1290 /*
1291 * NOTE: In theory we should record all found index numbers
1292 * to find any duplicated indexes, but that will be too time
1293 * consuming for inodes with too many hard links.
1294 */
1295 ptr += sizeof(*iref) + namelen;
1296 }
1297 return 0;
1298}
1299
Qu Wenruo82fc28f2019-03-20 13:16:42 +08001300/*
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001301 * Common point to switch the item-specific validation.
1302 */
David Sterba0076bc892019-03-20 16:22:00 +01001303static int check_leaf_item(struct extent_buffer *leaf,
Filipe Manana4e9845e2019-05-06 16:44:12 +01001304 struct btrfs_key *key, int slot,
1305 struct btrfs_key *prev_key)
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001306{
1307 int ret = 0;
Qu Wenruo075cb3c2019-03-20 13:42:33 +08001308 struct btrfs_chunk *chunk;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001309
1310 switch (key->type) {
1311 case BTRFS_EXTENT_DATA_KEY:
Filipe Manana4e9845e2019-05-06 16:44:12 +01001312 ret = check_extent_data_item(leaf, key, slot, prev_key);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001313 break;
1314 case BTRFS_EXTENT_CSUM_KEY:
David Sterba68128ce2019-03-20 16:02:56 +01001315 ret = check_csum_item(leaf, key, slot);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001316 break;
Qu Wenruoad7b03682017-11-08 08:54:25 +08001317 case BTRFS_DIR_ITEM_KEY:
1318 case BTRFS_DIR_INDEX_KEY:
1319 case BTRFS_XATTR_ITEM_KEY:
Qu Wenruoc18679e2019-08-26 15:40:38 +08001320 ret = check_dir_item(leaf, key, prev_key, slot);
Qu Wenruoad7b03682017-11-08 08:54:25 +08001321 break;
Qu Wenruo71bf92a92019-08-26 15:40:39 +08001322 case BTRFS_INODE_REF_KEY:
1323 ret = check_inode_ref(leaf, key, prev_key, slot);
1324 break;
Qu Wenruofce466e2018-07-03 17:10:05 +08001325 case BTRFS_BLOCK_GROUP_ITEM_KEY:
David Sterbaaf60ce22019-03-20 16:19:31 +01001326 ret = check_block_group_item(leaf, key, slot);
Qu Wenruofce466e2018-07-03 17:10:05 +08001327 break;
Qu Wenruo075cb3c2019-03-20 13:42:33 +08001328 case BTRFS_CHUNK_ITEM_KEY:
1329 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
David Sterbaddaf1d52019-03-20 16:40:48 +01001330 ret = btrfs_check_chunk_valid(leaf, chunk, key->offset);
Qu Wenruo075cb3c2019-03-20 13:42:33 +08001331 break;
Qu Wenruoab4ba2e2019-03-08 14:20:03 +08001332 case BTRFS_DEV_ITEM_KEY:
David Sterba412a2312019-03-20 16:22:58 +01001333 ret = check_dev_item(leaf, key, slot);
Qu Wenruoab4ba2e2019-03-08 14:20:03 +08001334 break;
Qu Wenruo496245c2019-03-13 14:31:35 +08001335 case BTRFS_INODE_ITEM_KEY:
David Sterba39e57f42019-03-20 16:22:58 +01001336 ret = check_inode_item(leaf, key, slot);
Qu Wenruo496245c2019-03-13 14:31:35 +08001337 break;
Qu Wenruo259ee772019-07-16 17:00:34 +08001338 case BTRFS_ROOT_ITEM_KEY:
1339 ret = check_root_item(leaf, key, slot);
1340 break;
Qu Wenruof82d1c7c2019-08-09 09:24:22 +08001341 case BTRFS_EXTENT_ITEM_KEY:
1342 case BTRFS_METADATA_ITEM_KEY:
1343 ret = check_extent_item(leaf, key, slot);
1344 break;
Qu Wenruoe2406a62019-08-09 09:24:23 +08001345 case BTRFS_TREE_BLOCK_REF_KEY:
1346 case BTRFS_SHARED_DATA_REF_KEY:
1347 case BTRFS_SHARED_BLOCK_REF_KEY:
1348 ret = check_simple_keyed_refs(leaf, key, slot);
1349 break;
Qu Wenruo0785a9a2019-08-09 09:24:24 +08001350 case BTRFS_EXTENT_DATA_REF_KEY:
1351 ret = check_extent_data_ref(leaf, key, slot);
1352 break;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001353 }
1354 return ret;
1355}
1356
David Sterbae2ccd362019-03-20 16:22:58 +01001357static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001358{
David Sterbae2ccd362019-03-20 16:22:58 +01001359 struct btrfs_fs_info *fs_info = leaf->fs_info;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001360 /* No valid key type is 0, so all key should be larger than this key */
1361 struct btrfs_key prev_key = {0, 0, 0};
1362 struct btrfs_key key;
1363 u32 nritems = btrfs_header_nritems(leaf);
1364 int slot;
1365
Qu Wenruof556faa2018-09-28 07:59:34 +08001366 if (btrfs_header_level(leaf) != 0) {
David Sterba86a6be32019-03-20 15:31:28 +01001367 generic_err(leaf, 0,
Qu Wenruof556faa2018-09-28 07:59:34 +08001368 "invalid level for leaf, have %d expect 0",
1369 btrfs_header_level(leaf));
1370 return -EUCLEAN;
1371 }
1372
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001373 /*
1374 * Extent buffers from a relocation tree have a owner field that
1375 * corresponds to the subvolume tree they are based on. So just from an
1376 * extent buffer alone we can not find out what is the id of the
1377 * corresponding subvolume tree, so we can not figure out if the extent
1378 * buffer corresponds to the root of the relocation tree or not. So
1379 * skip this check for relocation trees.
1380 */
1381 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
Qu Wenruoba480dd2018-07-03 17:10:06 +08001382 u64 owner = btrfs_header_owner(leaf);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001383
Qu Wenruoba480dd2018-07-03 17:10:06 +08001384 /* These trees must never be empty */
1385 if (owner == BTRFS_ROOT_TREE_OBJECTID ||
1386 owner == BTRFS_CHUNK_TREE_OBJECTID ||
1387 owner == BTRFS_EXTENT_TREE_OBJECTID ||
1388 owner == BTRFS_DEV_TREE_OBJECTID ||
1389 owner == BTRFS_FS_TREE_OBJECTID ||
1390 owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
David Sterba86a6be32019-03-20 15:31:28 +01001391 generic_err(leaf, 0,
Qu Wenruoba480dd2018-07-03 17:10:06 +08001392 "invalid root, root %llu must never be empty",
1393 owner);
1394 return -EUCLEAN;
1395 }
Qu Wenruo62fdaa52019-08-22 10:14:15 +08001396 /* Unknown tree */
1397 if (owner == 0) {
1398 generic_err(leaf, 0,
1399 "invalid owner, root 0 is not defined");
1400 return -EUCLEAN;
1401 }
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001402 return 0;
1403 }
1404
1405 if (nritems == 0)
1406 return 0;
1407
1408 /*
1409 * Check the following things to make sure this is a good leaf, and
1410 * leaf users won't need to bother with similar sanity checks:
1411 *
1412 * 1) key ordering
1413 * 2) item offset and size
1414 * No overlap, no hole, all inside the leaf.
1415 * 3) item content
1416 * If possible, do comprehensive sanity check.
1417 * NOTE: All checks must only rely on the item data itself.
1418 */
1419 for (slot = 0; slot < nritems; slot++) {
1420 u32 item_end_expected;
1421 int ret;
1422
1423 btrfs_item_key_to_cpu(leaf, &key, slot);
1424
1425 /* Make sure the keys are in the right order */
1426 if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
David Sterba86a6be32019-03-20 15:31:28 +01001427 generic_err(leaf, slot,
Qu Wenruo478d01b2017-10-09 01:51:04 +00001428 "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
1429 prev_key.objectid, prev_key.type,
1430 prev_key.offset, key.objectid, key.type,
1431 key.offset);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001432 return -EUCLEAN;
1433 }
1434
1435 /*
1436 * Make sure the offset and ends are right, remember that the
1437 * item data starts at the end of the leaf and grows towards the
1438 * front.
1439 */
1440 if (slot == 0)
1441 item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
1442 else
1443 item_end_expected = btrfs_item_offset_nr(leaf,
1444 slot - 1);
1445 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
David Sterba86a6be32019-03-20 15:31:28 +01001446 generic_err(leaf, slot,
Qu Wenruo478d01b2017-10-09 01:51:04 +00001447 "unexpected item end, have %u expect %u",
1448 btrfs_item_end_nr(leaf, slot),
1449 item_end_expected);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001450 return -EUCLEAN;
1451 }
1452
1453 /*
1454 * Check to make sure that we don't point outside of the leaf,
1455 * just in case all the items are consistent to each other, but
1456 * all point outside of the leaf.
1457 */
1458 if (btrfs_item_end_nr(leaf, slot) >
1459 BTRFS_LEAF_DATA_SIZE(fs_info)) {
David Sterba86a6be32019-03-20 15:31:28 +01001460 generic_err(leaf, slot,
Qu Wenruo478d01b2017-10-09 01:51:04 +00001461 "slot end outside of leaf, have %u expect range [0, %u]",
1462 btrfs_item_end_nr(leaf, slot),
1463 BTRFS_LEAF_DATA_SIZE(fs_info));
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001464 return -EUCLEAN;
1465 }
1466
1467 /* Also check if the item pointer overlaps with btrfs item. */
1468 if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
1469 btrfs_item_ptr_offset(leaf, slot)) {
David Sterba86a6be32019-03-20 15:31:28 +01001470 generic_err(leaf, slot,
Qu Wenruo478d01b2017-10-09 01:51:04 +00001471 "slot overlaps with its data, item end %lu data start %lu",
1472 btrfs_item_nr_offset(slot) +
1473 sizeof(struct btrfs_item),
1474 btrfs_item_ptr_offset(leaf, slot));
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001475 return -EUCLEAN;
1476 }
1477
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001478 if (check_item_data) {
1479 /*
1480 * Check if the item size and content meet other
1481 * criteria
1482 */
Filipe Manana4e9845e2019-05-06 16:44:12 +01001483 ret = check_leaf_item(leaf, &key, slot, &prev_key);
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001484 if (ret < 0)
1485 return ret;
1486 }
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001487
1488 prev_key.objectid = key.objectid;
1489 prev_key.type = key.type;
1490 prev_key.offset = key.offset;
1491 }
1492
1493 return 0;
1494}
1495
David Sterba1c4360e2019-03-20 16:23:29 +01001496int btrfs_check_leaf_full(struct extent_buffer *leaf)
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001497{
David Sterbae2ccd362019-03-20 16:22:58 +01001498 return check_leaf(leaf, true);
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001499}
Qu Wenruo02529d72019-04-24 15:22:53 +08001500ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO);
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001501
David Sterbacfdaad52019-03-20 16:24:18 +01001502int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001503{
David Sterbae2ccd362019-03-20 16:22:58 +01001504 return check_leaf(leaf, false);
Qu Wenruo69fc6cb2017-11-08 08:54:24 +08001505}
1506
David Sterba813fd1d2019-03-20 16:25:00 +01001507int btrfs_check_node(struct extent_buffer *node)
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001508{
David Sterba813fd1d2019-03-20 16:25:00 +01001509 struct btrfs_fs_info *fs_info = node->fs_info;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001510 unsigned long nr = btrfs_header_nritems(node);
1511 struct btrfs_key key, next_key;
1512 int slot;
Qu Wenruof556faa2018-09-28 07:59:34 +08001513 int level = btrfs_header_level(node);
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001514 u64 bytenr;
1515 int ret = 0;
1516
Qu Wenruof556faa2018-09-28 07:59:34 +08001517 if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
David Sterba86a6be32019-03-20 15:31:28 +01001518 generic_err(node, 0,
Qu Wenruof556faa2018-09-28 07:59:34 +08001519 "invalid level for node, have %d expect [1, %d]",
1520 level, BTRFS_MAX_LEVEL - 1);
1521 return -EUCLEAN;
1522 }
Qu Wenruo2f659542018-01-25 14:56:18 +08001523 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
1524 btrfs_crit(fs_info,
Qu Wenruobba4f292017-10-09 01:51:03 +00001525"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
Qu Wenruo2f659542018-01-25 14:56:18 +08001526 btrfs_header_owner(node), node->start,
Qu Wenruobba4f292017-10-09 01:51:03 +00001527 nr == 0 ? "small" : "large", nr,
Qu Wenruo2f659542018-01-25 14:56:18 +08001528 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
Qu Wenruobba4f292017-10-09 01:51:03 +00001529 return -EUCLEAN;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001530 }
1531
1532 for (slot = 0; slot < nr - 1; slot++) {
1533 bytenr = btrfs_node_blockptr(node, slot);
1534 btrfs_node_key_to_cpu(node, &key, slot);
1535 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
1536
1537 if (!bytenr) {
David Sterba86a6be32019-03-20 15:31:28 +01001538 generic_err(node, slot,
Qu Wenruobba4f292017-10-09 01:51:03 +00001539 "invalid NULL node pointer");
1540 ret = -EUCLEAN;
1541 goto out;
1542 }
Qu Wenruo2f659542018-01-25 14:56:18 +08001543 if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
David Sterba86a6be32019-03-20 15:31:28 +01001544 generic_err(node, slot,
Qu Wenruobba4f292017-10-09 01:51:03 +00001545 "unaligned pointer, have %llu should be aligned to %u",
Qu Wenruo2f659542018-01-25 14:56:18 +08001546 bytenr, fs_info->sectorsize);
Qu Wenruobba4f292017-10-09 01:51:03 +00001547 ret = -EUCLEAN;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001548 goto out;
1549 }
1550
1551 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
David Sterba86a6be32019-03-20 15:31:28 +01001552 generic_err(node, slot,
Qu Wenruobba4f292017-10-09 01:51:03 +00001553 "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
1554 key.objectid, key.type, key.offset,
1555 next_key.objectid, next_key.type,
1556 next_key.offset);
1557 ret = -EUCLEAN;
Qu Wenruo557ea5d2017-10-09 01:51:02 +00001558 goto out;
1559 }
1560 }
1561out:
1562 return ret;
1563}
Qu Wenruo02529d72019-04-24 15:22:53 +08001564ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);