David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2 | /* |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3 | * Copyright (C) 2011, 2012 STRATO. All rights reserved. |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 6 | #include <linux/blkdev.h> |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 7 | #include <linux/ratelimit.h> |
David Sterba | de2491f | 2017-05-31 19:21:38 +0200 | [diff] [blame] | 8 | #include <linux/sched/mm.h> |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 9 | #include <crypto/hash.h> |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 10 | #include "ctree.h" |
Dennis Zhou | 6e80d4f | 2019-12-13 16:22:15 -0800 | [diff] [blame] | 11 | #include "discard.h" |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 12 | #include "volumes.h" |
| 13 | #include "disk-io.h" |
| 14 | #include "ordered-data.h" |
Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 15 | #include "transaction.h" |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 16 | #include "backref.h" |
Jan Schmidt | 5da6fcb | 2011-08-04 18:11:04 +0200 | [diff] [blame] | 17 | #include "extent_io.h" |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 18 | #include "dev-replace.h" |
Stefan Behrens | 21adbd5 | 2011-11-09 13:44:05 +0100 | [diff] [blame] | 19 | #include "check-integrity.h" |
Josef Bacik | 606686e | 2012-06-04 14:03:51 -0400 | [diff] [blame] | 20 | #include "rcu-string.h" |
David Woodhouse | 53b381b | 2013-01-29 18:40:14 -0500 | [diff] [blame] | 21 | #include "raid56.h" |
Josef Bacik | aac0023 | 2019-06-20 15:37:44 -0400 | [diff] [blame] | 22 | #include "block-group.h" |
Naohiro Aota | 1265925 | 2020-11-10 20:26:14 +0900 | [diff] [blame] | 23 | #include "zoned.h" |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * This is only the first step towards a full-features scrub. It reads all |
| 27 | * extent and super block and verifies the checksums. In case a bad checksum |
| 28 | * is found or the extent cannot be read, good data will be written back if |
| 29 | * any can be found. |
| 30 | * |
| 31 | * Future enhancements: |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 32 | * - In case an unrepairable extent is encountered, track which files are |
| 33 | * affected and report them |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 34 | * - track and record media errors, throw out bad devices |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 35 | * - add a mode to also read unallocated space |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 36 | */ |
| 37 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 38 | struct scrub_block; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 39 | struct scrub_ctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 40 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 41 | /* |
| 42 | * the following three values only influence the performance. |
| 43 | * The last one configures the number of parallel and outstanding I/O |
| 44 | * operations. The first two values configure an upper limit for the number |
| 45 | * of (dynamically allocated) pages that are added to a bio. |
| 46 | */ |
| 47 | #define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */ |
| 48 | #define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */ |
| 49 | #define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */ |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * the following value times PAGE_SIZE needs to be large enough to match the |
| 53 | * largest node/leaf/sector size that shall be supported. |
| 54 | * Values larger than BTRFS_STRIPE_LEN are not supported. |
| 55 | */ |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 56 | #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 57 | |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 58 | struct scrub_recover { |
Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 59 | refcount_t refs; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 60 | struct btrfs_io_context *bioc; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 61 | u64 map_length; |
| 62 | }; |
| 63 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 64 | struct scrub_page { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 65 | struct scrub_block *sblock; |
| 66 | struct page *page; |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 67 | struct btrfs_device *dev; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 68 | struct list_head list; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 69 | u64 flags; /* extent flags */ |
| 70 | u64 generation; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 71 | u64 logical; |
| 72 | u64 physical; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 73 | u64 physical_for_dev_replace; |
Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 74 | atomic_t refs; |
Qu Wenruo | 2c36395 | 2020-11-13 20:51:44 +0800 | [diff] [blame] | 75 | u8 mirror_num; |
Colin Ian King | d08e38b | 2021-11-10 19:20:08 +0000 | [diff] [blame] | 76 | unsigned int have_csum:1; |
| 77 | unsigned int io_error:1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 78 | u8 csum[BTRFS_CSUM_SIZE]; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 79 | |
| 80 | struct scrub_recover *recover; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | struct scrub_bio { |
| 84 | int index; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 85 | struct scrub_ctx *sctx; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 86 | struct btrfs_device *dev; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 87 | struct bio *bio; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 88 | blk_status_t status; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 89 | u64 logical; |
| 90 | u64 physical; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 91 | #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO |
| 92 | struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO]; |
| 93 | #else |
| 94 | struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO]; |
| 95 | #endif |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 96 | int page_count; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 97 | int next_free; |
| 98 | struct btrfs_work work; |
| 99 | }; |
| 100 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 101 | struct scrub_block { |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 102 | struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 103 | int page_count; |
| 104 | atomic_t outstanding_pages; |
Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 105 | refcount_t refs; /* free mem on transition to zero */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 106 | struct scrub_ctx *sctx; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 107 | struct scrub_parity *sparity; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 108 | struct { |
| 109 | unsigned int header_error:1; |
| 110 | unsigned int checksum_error:1; |
| 111 | unsigned int no_io_error_seen:1; |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 112 | unsigned int generation_error:1; /* also sets header_error */ |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 113 | |
| 114 | /* The following is for the data used to check parity */ |
| 115 | /* It is for the data with checksum */ |
| 116 | unsigned int data_corrected:1; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 117 | }; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 118 | struct btrfs_work work; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 119 | }; |
| 120 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 121 | /* Used for the chunks with parity stripe such RAID5/6 */ |
| 122 | struct scrub_parity { |
| 123 | struct scrub_ctx *sctx; |
| 124 | |
| 125 | struct btrfs_device *scrub_dev; |
| 126 | |
| 127 | u64 logic_start; |
| 128 | |
| 129 | u64 logic_end; |
| 130 | |
| 131 | int nsectors; |
| 132 | |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 133 | u32 stripe_len; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 134 | |
Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 135 | refcount_t refs; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 136 | |
| 137 | struct list_head spages; |
| 138 | |
| 139 | /* Work of parity check and repair */ |
| 140 | struct btrfs_work work; |
| 141 | |
| 142 | /* Mark the parity blocks which have data */ |
| 143 | unsigned long *dbitmap; |
| 144 | |
| 145 | /* |
| 146 | * Mark the parity blocks which have data, but errors happen when |
| 147 | * read data or check data |
| 148 | */ |
| 149 | unsigned long *ebitmap; |
| 150 | |
Gustavo A. R. Silva | a8753ee | 2020-03-06 16:13:33 -0600 | [diff] [blame] | 151 | unsigned long bitmap[]; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 152 | }; |
| 153 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 154 | struct scrub_ctx { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 155 | struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX]; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 156 | struct btrfs_fs_info *fs_info; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 157 | int first_free; |
| 158 | int curr; |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 159 | atomic_t bios_in_flight; |
| 160 | atomic_t workers_pending; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 161 | spinlock_t list_lock; |
| 162 | wait_queue_head_t list_wait; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 163 | struct list_head csum_list; |
| 164 | atomic_t cancel_req; |
Arne Jansen | 8628764 | 2011-03-23 16:34:19 +0100 | [diff] [blame] | 165 | int readonly; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 166 | int pages_per_rd_bio; |
Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 167 | |
David Sterba | eb3b505 | 2019-10-09 13:58:13 +0200 | [diff] [blame] | 168 | /* State of IO submission throttling affecting the associated device */ |
| 169 | ktime_t throttle_deadline; |
| 170 | u64 throttle_sent; |
| 171 | |
Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 172 | int is_dev_replace; |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 173 | u64 write_pointer; |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 174 | |
| 175 | struct scrub_bio *wr_curr_bio; |
| 176 | struct mutex wr_lock; |
| 177 | int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 178 | struct btrfs_device *wr_tgtdev; |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 179 | bool flush_all_writes; |
Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 180 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 181 | /* |
| 182 | * statistics |
| 183 | */ |
| 184 | struct btrfs_scrub_progress stat; |
| 185 | spinlock_t stat_lock; |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * Use a ref counter to avoid use-after-free issues. Scrub workers |
| 189 | * decrement bios_in_flight and workers_pending and then do a wakeup |
| 190 | * on the list_wait wait queue. We must ensure the main scrub task |
| 191 | * doesn't free the scrub context before or while the workers are |
| 192 | * doing the wakeup() call. |
| 193 | */ |
Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 194 | refcount_t refs; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 195 | }; |
| 196 | |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 197 | struct scrub_warning { |
| 198 | struct btrfs_path *path; |
| 199 | u64 extent_item_size; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 200 | const char *errstr; |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 201 | u64 physical; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 202 | u64 logical; |
| 203 | struct btrfs_device *dev; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 204 | }; |
| 205 | |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 206 | struct full_stripe_lock { |
| 207 | struct rb_node node; |
| 208 | u64 logical; |
| 209 | u64 refs; |
| 210 | struct mutex mutex; |
| 211 | }; |
| 212 | |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 213 | static int scrub_setup_recheck_block(struct scrub_block *original_sblock, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 214 | struct scrub_block *sblocks_for_recheck); |
Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 215 | static void scrub_recheck_block(struct btrfs_fs_info *fs_info, |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 216 | struct scrub_block *sblock, |
| 217 | int retry_failed_mirror); |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 218 | static void scrub_recheck_block_checksum(struct scrub_block *sblock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 219 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, |
Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 220 | struct scrub_block *sblock_good); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 221 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, |
| 222 | struct scrub_block *sblock_good, |
| 223 | int page_num, int force_write); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 224 | static void scrub_write_block_to_dev_replace(struct scrub_block *sblock); |
| 225 | static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, |
| 226 | int page_num); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 227 | static int scrub_checksum_data(struct scrub_block *sblock); |
| 228 | static int scrub_checksum_tree_block(struct scrub_block *sblock); |
| 229 | static int scrub_checksum_super(struct scrub_block *sblock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 230 | static void scrub_block_put(struct scrub_block *sblock); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 231 | static void scrub_page_get(struct scrub_page *spage); |
| 232 | static void scrub_page_put(struct scrub_page *spage); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 233 | static void scrub_parity_get(struct scrub_parity *sparity); |
| 234 | static void scrub_parity_put(struct scrub_parity *sparity); |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 235 | static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 236 | u64 physical, struct btrfs_device *dev, u64 flags, |
Qu Wenruo | 96e63a4 | 2020-11-03 21:31:02 +0800 | [diff] [blame] | 237 | u64 gen, int mirror_num, u8 *csum, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 238 | u64 physical_for_dev_replace); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 239 | static void scrub_bio_end_io(struct bio *bio); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 240 | static void scrub_bio_end_io_worker(struct btrfs_work *work); |
| 241 | static void scrub_block_complete(struct scrub_block *sblock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 242 | static void scrub_remap_extent(struct btrfs_fs_info *fs_info, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 243 | u64 extent_logical, u32 extent_len, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 244 | u64 *extent_physical, |
| 245 | struct btrfs_device **extent_dev, |
| 246 | int *extent_mirror_num); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 247 | static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, |
| 248 | struct scrub_page *spage); |
| 249 | static void scrub_wr_submit(struct scrub_ctx *sctx); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 250 | static void scrub_wr_bio_end_io(struct bio *bio); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 251 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 252 | static void scrub_put_ctx(struct scrub_ctx *sctx); |
Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 253 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 254 | static inline int scrub_is_page_on_raid56(struct scrub_page *spage) |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 255 | { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 256 | return spage->recover && |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 257 | (spage->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 258 | } |
Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 259 | |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 260 | static void scrub_pending_bio_inc(struct scrub_ctx *sctx) |
| 261 | { |
Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 262 | refcount_inc(&sctx->refs); |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 263 | atomic_inc(&sctx->bios_in_flight); |
| 264 | } |
| 265 | |
| 266 | static void scrub_pending_bio_dec(struct scrub_ctx *sctx) |
| 267 | { |
| 268 | atomic_dec(&sctx->bios_in_flight); |
| 269 | wake_up(&sctx->list_wait); |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 270 | scrub_put_ctx(sctx); |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 271 | } |
| 272 | |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 273 | static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 274 | { |
| 275 | while (atomic_read(&fs_info->scrub_pause_req)) { |
| 276 | mutex_unlock(&fs_info->scrub_lock); |
| 277 | wait_event(fs_info->scrub_pause_wait, |
| 278 | atomic_read(&fs_info->scrub_pause_req) == 0); |
| 279 | mutex_lock(&fs_info->scrub_lock); |
| 280 | } |
| 281 | } |
| 282 | |
Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 283 | static void scrub_pause_on(struct btrfs_fs_info *fs_info) |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 284 | { |
| 285 | atomic_inc(&fs_info->scrubs_paused); |
| 286 | wake_up(&fs_info->scrub_pause_wait); |
Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 287 | } |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 288 | |
Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 289 | static void scrub_pause_off(struct btrfs_fs_info *fs_info) |
| 290 | { |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 291 | mutex_lock(&fs_info->scrub_lock); |
| 292 | __scrub_blocked_if_needed(fs_info); |
| 293 | atomic_dec(&fs_info->scrubs_paused); |
| 294 | mutex_unlock(&fs_info->scrub_lock); |
| 295 | |
| 296 | wake_up(&fs_info->scrub_pause_wait); |
| 297 | } |
| 298 | |
Zhaolei | 0e22be8 | 2015-08-05 16:43:28 +0800 | [diff] [blame] | 299 | static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) |
| 300 | { |
| 301 | scrub_pause_on(fs_info); |
| 302 | scrub_pause_off(fs_info); |
| 303 | } |
| 304 | |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 305 | /* |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 306 | * Insert new full stripe lock into full stripe locks tree |
| 307 | * |
| 308 | * Return pointer to existing or newly inserted full_stripe_lock structure if |
| 309 | * everything works well. |
| 310 | * Return ERR_PTR(-ENOMEM) if we failed to allocate memory |
| 311 | * |
| 312 | * NOTE: caller must hold full_stripe_locks_root->lock before calling this |
| 313 | * function |
| 314 | */ |
| 315 | static struct full_stripe_lock *insert_full_stripe_lock( |
| 316 | struct btrfs_full_stripe_locks_tree *locks_root, |
| 317 | u64 fstripe_logical) |
| 318 | { |
| 319 | struct rb_node **p; |
| 320 | struct rb_node *parent = NULL; |
| 321 | struct full_stripe_lock *entry; |
| 322 | struct full_stripe_lock *ret; |
| 323 | |
David Sterba | a32bf9a | 2018-03-16 02:21:22 +0100 | [diff] [blame] | 324 | lockdep_assert_held(&locks_root->lock); |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 325 | |
| 326 | p = &locks_root->root.rb_node; |
| 327 | while (*p) { |
| 328 | parent = *p; |
| 329 | entry = rb_entry(parent, struct full_stripe_lock, node); |
| 330 | if (fstripe_logical < entry->logical) { |
| 331 | p = &(*p)->rb_left; |
| 332 | } else if (fstripe_logical > entry->logical) { |
| 333 | p = &(*p)->rb_right; |
| 334 | } else { |
| 335 | entry->refs++; |
| 336 | return entry; |
| 337 | } |
| 338 | } |
| 339 | |
Filipe Manana | a5fb114 | 2018-11-26 20:07:17 +0000 | [diff] [blame] | 340 | /* |
| 341 | * Insert new lock. |
Filipe Manana | a5fb114 | 2018-11-26 20:07:17 +0000 | [diff] [blame] | 342 | */ |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 343 | ret = kmalloc(sizeof(*ret), GFP_KERNEL); |
| 344 | if (!ret) |
| 345 | return ERR_PTR(-ENOMEM); |
| 346 | ret->logical = fstripe_logical; |
| 347 | ret->refs = 1; |
| 348 | mutex_init(&ret->mutex); |
| 349 | |
| 350 | rb_link_node(&ret->node, parent, p); |
| 351 | rb_insert_color(&ret->node, &locks_root->root); |
| 352 | return ret; |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * Search for a full stripe lock of a block group |
| 357 | * |
| 358 | * Return pointer to existing full stripe lock if found |
| 359 | * Return NULL if not found |
| 360 | */ |
| 361 | static struct full_stripe_lock *search_full_stripe_lock( |
| 362 | struct btrfs_full_stripe_locks_tree *locks_root, |
| 363 | u64 fstripe_logical) |
| 364 | { |
| 365 | struct rb_node *node; |
| 366 | struct full_stripe_lock *entry; |
| 367 | |
David Sterba | a32bf9a | 2018-03-16 02:21:22 +0100 | [diff] [blame] | 368 | lockdep_assert_held(&locks_root->lock); |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 369 | |
| 370 | node = locks_root->root.rb_node; |
| 371 | while (node) { |
| 372 | entry = rb_entry(node, struct full_stripe_lock, node); |
| 373 | if (fstripe_logical < entry->logical) |
| 374 | node = node->rb_left; |
| 375 | else if (fstripe_logical > entry->logical) |
| 376 | node = node->rb_right; |
| 377 | else |
| 378 | return entry; |
| 379 | } |
| 380 | return NULL; |
| 381 | } |
| 382 | |
| 383 | /* |
| 384 | * Helper to get full stripe logical from a normal bytenr. |
| 385 | * |
| 386 | * Caller must ensure @cache is a RAID56 block group. |
| 387 | */ |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 388 | static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr) |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 389 | { |
| 390 | u64 ret; |
| 391 | |
| 392 | /* |
| 393 | * Due to chunk item size limit, full stripe length should not be |
| 394 | * larger than U32_MAX. Just a sanity check here. |
| 395 | */ |
| 396 | WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); |
| 397 | |
| 398 | /* |
| 399 | * round_down() can only handle power of 2, while RAID56 full |
| 400 | * stripe length can be 64KiB * n, so we need to manually round down. |
| 401 | */ |
David Sterba | b3470b5 | 2019-10-23 18:48:22 +0200 | [diff] [blame] | 402 | ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) * |
| 403 | cache->full_stripe_len + cache->start; |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 404 | return ret; |
| 405 | } |
| 406 | |
| 407 | /* |
| 408 | * Lock a full stripe to avoid concurrency of recovery and read |
| 409 | * |
| 410 | * It's only used for profiles with parities (RAID5/6), for other profiles it |
| 411 | * does nothing. |
| 412 | * |
| 413 | * Return 0 if we locked full stripe covering @bytenr, with a mutex held. |
| 414 | * So caller must call unlock_full_stripe() at the same context. |
| 415 | * |
| 416 | * Return <0 if encounters error. |
| 417 | */ |
| 418 | static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, |
| 419 | bool *locked_ret) |
| 420 | { |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 421 | struct btrfs_block_group *bg_cache; |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 422 | struct btrfs_full_stripe_locks_tree *locks_root; |
| 423 | struct full_stripe_lock *existing; |
| 424 | u64 fstripe_start; |
| 425 | int ret = 0; |
| 426 | |
| 427 | *locked_ret = false; |
| 428 | bg_cache = btrfs_lookup_block_group(fs_info, bytenr); |
| 429 | if (!bg_cache) { |
| 430 | ASSERT(0); |
| 431 | return -ENOENT; |
| 432 | } |
| 433 | |
| 434 | /* Profiles not based on parity don't need full stripe lock */ |
| 435 | if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) |
| 436 | goto out; |
| 437 | locks_root = &bg_cache->full_stripe_locks_root; |
| 438 | |
| 439 | fstripe_start = get_full_stripe_logical(bg_cache, bytenr); |
| 440 | |
| 441 | /* Now insert the full stripe lock */ |
| 442 | mutex_lock(&locks_root->lock); |
| 443 | existing = insert_full_stripe_lock(locks_root, fstripe_start); |
| 444 | mutex_unlock(&locks_root->lock); |
| 445 | if (IS_ERR(existing)) { |
| 446 | ret = PTR_ERR(existing); |
| 447 | goto out; |
| 448 | } |
| 449 | mutex_lock(&existing->mutex); |
| 450 | *locked_ret = true; |
| 451 | out: |
| 452 | btrfs_put_block_group(bg_cache); |
| 453 | return ret; |
| 454 | } |
| 455 | |
| 456 | /* |
| 457 | * Unlock a full stripe. |
| 458 | * |
| 459 | * NOTE: Caller must ensure it's the same context calling corresponding |
| 460 | * lock_full_stripe(). |
| 461 | * |
| 462 | * Return 0 if we unlock full stripe without problem. |
| 463 | * Return <0 for error |
| 464 | */ |
| 465 | static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, |
| 466 | bool locked) |
| 467 | { |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 468 | struct btrfs_block_group *bg_cache; |
Qu Wenruo | 0966a7b | 2017-04-14 08:35:54 +0800 | [diff] [blame] | 469 | struct btrfs_full_stripe_locks_tree *locks_root; |
| 470 | struct full_stripe_lock *fstripe_lock; |
| 471 | u64 fstripe_start; |
| 472 | bool freeit = false; |
| 473 | int ret = 0; |
| 474 | |
| 475 | /* If we didn't acquire full stripe lock, no need to continue */ |
| 476 | if (!locked) |
| 477 | return 0; |
| 478 | |
| 479 | bg_cache = btrfs_lookup_block_group(fs_info, bytenr); |
| 480 | if (!bg_cache) { |
| 481 | ASSERT(0); |
| 482 | return -ENOENT; |
| 483 | } |
| 484 | if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) |
| 485 | goto out; |
| 486 | |
| 487 | locks_root = &bg_cache->full_stripe_locks_root; |
| 488 | fstripe_start = get_full_stripe_logical(bg_cache, bytenr); |
| 489 | |
| 490 | mutex_lock(&locks_root->lock); |
| 491 | fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start); |
| 492 | /* Unpaired unlock_full_stripe() detected */ |
| 493 | if (!fstripe_lock) { |
| 494 | WARN_ON(1); |
| 495 | ret = -ENOENT; |
| 496 | mutex_unlock(&locks_root->lock); |
| 497 | goto out; |
| 498 | } |
| 499 | |
| 500 | if (fstripe_lock->refs == 0) { |
| 501 | WARN_ON(1); |
| 502 | btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", |
| 503 | fstripe_lock->logical); |
| 504 | } else { |
| 505 | fstripe_lock->refs--; |
| 506 | } |
| 507 | |
| 508 | if (fstripe_lock->refs == 0) { |
| 509 | rb_erase(&fstripe_lock->node, &locks_root->root); |
| 510 | freeit = true; |
| 511 | } |
| 512 | mutex_unlock(&locks_root->lock); |
| 513 | |
| 514 | mutex_unlock(&fstripe_lock->mutex); |
| 515 | if (freeit) |
| 516 | kfree(fstripe_lock); |
| 517 | out: |
| 518 | btrfs_put_block_group(bg_cache); |
| 519 | return ret; |
| 520 | } |
| 521 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 522 | static void scrub_free_csums(struct scrub_ctx *sctx) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 523 | { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 524 | while (!list_empty(&sctx->csum_list)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 525 | struct btrfs_ordered_sum *sum; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 526 | sum = list_first_entry(&sctx->csum_list, |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 527 | struct btrfs_ordered_sum, list); |
| 528 | list_del(&sum->list); |
| 529 | kfree(sum); |
| 530 | } |
| 531 | } |
| 532 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 533 | static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 534 | { |
| 535 | int i; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 536 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 537 | if (!sctx) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 538 | return; |
| 539 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 540 | /* this can happen when scrub is cancelled */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 541 | if (sctx->curr != -1) { |
| 542 | struct scrub_bio *sbio = sctx->bios[sctx->curr]; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 543 | |
| 544 | for (i = 0; i < sbio->page_count; i++) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 545 | WARN_ON(!sbio->pagev[i]->page); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 546 | scrub_block_put(sbio->pagev[i]->sblock); |
| 547 | } |
| 548 | bio_put(sbio->bio); |
| 549 | } |
| 550 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 551 | for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 552 | struct scrub_bio *sbio = sctx->bios[i]; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 553 | |
| 554 | if (!sbio) |
| 555 | break; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 556 | kfree(sbio); |
| 557 | } |
| 558 | |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 559 | kfree(sctx->wr_curr_bio); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 560 | scrub_free_csums(sctx); |
| 561 | kfree(sctx); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 562 | } |
| 563 | |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 564 | static void scrub_put_ctx(struct scrub_ctx *sctx) |
| 565 | { |
Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 566 | if (refcount_dec_and_test(&sctx->refs)) |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 567 | scrub_free_ctx(sctx); |
| 568 | } |
| 569 | |
David Sterba | 92f7ba4 | 2018-12-04 16:11:55 +0100 | [diff] [blame] | 570 | static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( |
| 571 | struct btrfs_fs_info *fs_info, int is_dev_replace) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 572 | { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 573 | struct scrub_ctx *sctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 574 | int i; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 575 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 576 | sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 577 | if (!sctx) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 578 | goto nomem; |
Elena Reshetova | 99f4cdb | 2017-03-03 10:55:25 +0200 | [diff] [blame] | 579 | refcount_set(&sctx->refs, 1); |
Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 580 | sctx->is_dev_replace = is_dev_replace; |
Kent Overstreet | b54ffb7 | 2015-05-19 14:31:01 +0200 | [diff] [blame] | 581 | sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 582 | sctx->curr = -1; |
David Sterba | 92f7ba4 | 2018-12-04 16:11:55 +0100 | [diff] [blame] | 583 | sctx->fs_info = fs_info; |
Dan Robertson | e49be14 | 2019-02-19 02:56:43 +0000 | [diff] [blame] | 584 | INIT_LIST_HEAD(&sctx->csum_list); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 585 | for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 586 | struct scrub_bio *sbio; |
| 587 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 588 | sbio = kzalloc(sizeof(*sbio), GFP_KERNEL); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 589 | if (!sbio) |
| 590 | goto nomem; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 591 | sctx->bios[i] = sbio; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 592 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 593 | sbio->index = i; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 594 | sbio->sctx = sctx; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 595 | sbio->page_count = 0; |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 596 | btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, |
| 597 | NULL); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 598 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 599 | if (i != SCRUB_BIOS_PER_SCTX - 1) |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 600 | sctx->bios[i]->next_free = i + 1; |
Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 601 | else |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 602 | sctx->bios[i]->next_free = -1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 603 | } |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 604 | sctx->first_free = 0; |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 605 | atomic_set(&sctx->bios_in_flight, 0); |
| 606 | atomic_set(&sctx->workers_pending, 0); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 607 | atomic_set(&sctx->cancel_req, 0); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 608 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 609 | spin_lock_init(&sctx->list_lock); |
| 610 | spin_lock_init(&sctx->stat_lock); |
| 611 | init_waitqueue_head(&sctx->list_wait); |
David Sterba | eb3b505 | 2019-10-09 13:58:13 +0200 | [diff] [blame] | 612 | sctx->throttle_deadline = 0; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 613 | |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 614 | WARN_ON(sctx->wr_curr_bio != NULL); |
| 615 | mutex_init(&sctx->wr_lock); |
| 616 | sctx->wr_curr_bio = NULL; |
David Sterba | 8fcdac3 | 2017-05-16 19:10:23 +0200 | [diff] [blame] | 617 | if (is_dev_replace) { |
David Sterba | ded5618 | 2017-06-26 15:19:00 +0200 | [diff] [blame] | 618 | WARN_ON(!fs_info->dev_replace.tgtdev); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 619 | sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; |
David Sterba | ded5618 | 2017-06-26 15:19:00 +0200 | [diff] [blame] | 620 | sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 621 | sctx->flush_all_writes = false; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 622 | } |
David Sterba | 8fcdac3 | 2017-05-16 19:10:23 +0200 | [diff] [blame] | 623 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 624 | return sctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 625 | |
| 626 | nomem: |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 627 | scrub_free_ctx(sctx); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 628 | return ERR_PTR(-ENOMEM); |
| 629 | } |
| 630 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 631 | static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, |
| 632 | void *warn_ctx) |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 633 | { |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 634 | u32 nlink; |
| 635 | int ret; |
| 636 | int i; |
David Sterba | de2491f | 2017-05-31 19:21:38 +0200 | [diff] [blame] | 637 | unsigned nofs_flag; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 638 | struct extent_buffer *eb; |
| 639 | struct btrfs_inode_item *inode_item; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 640 | struct scrub_warning *swarn = warn_ctx; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 641 | struct btrfs_fs_info *fs_info = swarn->dev->fs_info; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 642 | struct inode_fs_paths *ipath = NULL; |
| 643 | struct btrfs_root *local_root; |
David Sterba | 1d4c08e | 2015-01-02 19:36:14 +0100 | [diff] [blame] | 644 | struct btrfs_key key; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 645 | |
David Sterba | 56e9357 | 2020-05-15 19:35:55 +0200 | [diff] [blame] | 646 | local_root = btrfs_get_fs_root(fs_info, root, true); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 647 | if (IS_ERR(local_root)) { |
| 648 | ret = PTR_ERR(local_root); |
| 649 | goto err; |
| 650 | } |
| 651 | |
David Sterba | 14692cc | 2015-01-02 18:55:46 +0100 | [diff] [blame] | 652 | /* |
| 653 | * this makes the path point to (inum INODE_ITEM ioff) |
| 654 | */ |
David Sterba | 1d4c08e | 2015-01-02 19:36:14 +0100 | [diff] [blame] | 655 | key.objectid = inum; |
| 656 | key.type = BTRFS_INODE_ITEM_KEY; |
| 657 | key.offset = 0; |
| 658 | |
| 659 | ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 660 | if (ret) { |
Josef Bacik | 0024652 | 2020-01-24 09:33:01 -0500 | [diff] [blame] | 661 | btrfs_put_root(local_root); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 662 | btrfs_release_path(swarn->path); |
| 663 | goto err; |
| 664 | } |
| 665 | |
| 666 | eb = swarn->path->nodes[0]; |
| 667 | inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], |
| 668 | struct btrfs_inode_item); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 669 | nlink = btrfs_inode_nlink(eb, inode_item); |
| 670 | btrfs_release_path(swarn->path); |
| 671 | |
David Sterba | de2491f | 2017-05-31 19:21:38 +0200 | [diff] [blame] | 672 | /* |
| 673 | * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub |
| 674 | * uses GFP_NOFS in this context, so we keep it consistent but it does |
| 675 | * not seem to be strictly necessary. |
| 676 | */ |
| 677 | nofs_flag = memalloc_nofs_save(); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 678 | ipath = init_ipath(4096, local_root, swarn->path); |
David Sterba | de2491f | 2017-05-31 19:21:38 +0200 | [diff] [blame] | 679 | memalloc_nofs_restore(nofs_flag); |
Dan Carpenter | 26bdef5 | 2011-11-16 11:28:01 +0300 | [diff] [blame] | 680 | if (IS_ERR(ipath)) { |
Josef Bacik | 0024652 | 2020-01-24 09:33:01 -0500 | [diff] [blame] | 681 | btrfs_put_root(local_root); |
Dan Carpenter | 26bdef5 | 2011-11-16 11:28:01 +0300 | [diff] [blame] | 682 | ret = PTR_ERR(ipath); |
| 683 | ipath = NULL; |
| 684 | goto err; |
| 685 | } |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 686 | ret = paths_from_inode(inum, ipath); |
| 687 | |
| 688 | if (ret < 0) |
| 689 | goto err; |
| 690 | |
| 691 | /* |
| 692 | * we deliberately ignore the bit ipath might have been too small to |
| 693 | * hold all of the paths here |
| 694 | */ |
| 695 | for (i = 0; i < ipath->fspath->elem_cnt; ++i) |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 696 | btrfs_warn_in_rcu(fs_info, |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 697 | "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)", |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 698 | swarn->errstr, swarn->logical, |
| 699 | rcu_str_deref(swarn->dev->name), |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 700 | swarn->physical, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 701 | root, inum, offset, |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 702 | fs_info->sectorsize, nlink, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 703 | (char *)(unsigned long)ipath->fspath->val[i]); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 704 | |
Josef Bacik | 0024652 | 2020-01-24 09:33:01 -0500 | [diff] [blame] | 705 | btrfs_put_root(local_root); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 706 | free_ipath(ipath); |
| 707 | return 0; |
| 708 | |
| 709 | err: |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 710 | btrfs_warn_in_rcu(fs_info, |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 711 | "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d", |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 712 | swarn->errstr, swarn->logical, |
| 713 | rcu_str_deref(swarn->dev->name), |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 714 | swarn->physical, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 715 | root, inum, offset, ret); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 716 | |
| 717 | free_ipath(ipath); |
| 718 | return 0; |
| 719 | } |
| 720 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 721 | static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 722 | { |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 723 | struct btrfs_device *dev; |
| 724 | struct btrfs_fs_info *fs_info; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 725 | struct btrfs_path *path; |
| 726 | struct btrfs_key found_key; |
| 727 | struct extent_buffer *eb; |
| 728 | struct btrfs_extent_item *ei; |
| 729 | struct scrub_warning swarn; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 730 | unsigned long ptr = 0; |
Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 731 | u64 extent_item_pos; |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 732 | u64 flags = 0; |
| 733 | u64 ref_root; |
| 734 | u32 item_size; |
Dan Carpenter | 07c9a8e | 2016-03-11 11:08:56 +0300 | [diff] [blame] | 735 | u8 ref_level = 0; |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 736 | int ret; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 737 | |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 738 | WARN_ON(sblock->page_count < 1); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 739 | dev = sblock->pagev[0]->dev; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 740 | fs_info = sblock->sctx->fs_info; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 741 | |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 742 | path = btrfs_alloc_path(); |
David Sterba | 8b9456d | 2014-07-30 01:25:30 +0200 | [diff] [blame] | 743 | if (!path) |
| 744 | return; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 745 | |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 746 | swarn.physical = sblock->pagev[0]->physical; |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 747 | swarn.logical = sblock->pagev[0]->logical; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 748 | swarn.errstr = errstr; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 749 | swarn.dev = NULL; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 750 | |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 751 | ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, |
| 752 | &flags); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 753 | if (ret < 0) |
| 754 | goto out; |
| 755 | |
Jan Schmidt | 4692cf5 | 2011-12-02 14:56:41 +0100 | [diff] [blame] | 756 | extent_item_pos = swarn.logical - found_key.objectid; |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 757 | swarn.extent_item_size = found_key.offset; |
| 758 | |
| 759 | eb = path->nodes[0]; |
| 760 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); |
Josef Bacik | 3212fa1 | 2021-10-21 14:58:35 -0400 | [diff] [blame] | 761 | item_size = btrfs_item_size(eb, path->slots[0]); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 762 | |
Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 763 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 764 | do { |
Liu Bo | 6eda71d | 2014-06-09 10:54:07 +0800 | [diff] [blame] | 765 | ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, |
| 766 | item_size, &ref_root, |
| 767 | &ref_level); |
David Sterba | ecaeb14 | 2015-10-08 09:01:03 +0200 | [diff] [blame] | 768 | btrfs_warn_in_rcu(fs_info, |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 769 | "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu", |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 770 | errstr, swarn.logical, |
Josef Bacik | 606686e | 2012-06-04 14:03:51 -0400 | [diff] [blame] | 771 | rcu_str_deref(dev->name), |
David Sterba | 6aa2126 | 2017-10-04 17:07:07 +0200 | [diff] [blame] | 772 | swarn.physical, |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 773 | ref_level ? "node" : "leaf", |
| 774 | ret < 0 ? -1 : ref_level, |
| 775 | ret < 0 ? -1 : ref_root); |
| 776 | } while (ret != 1); |
Josef Bacik | d8fe29e | 2013-03-29 08:09:34 -0600 | [diff] [blame] | 777 | btrfs_release_path(path); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 778 | } else { |
Josef Bacik | d8fe29e | 2013-03-29 08:09:34 -0600 | [diff] [blame] | 779 | btrfs_release_path(path); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 780 | swarn.path = path; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 781 | swarn.dev = dev; |
Jan Schmidt | 7a3ae2f | 2012-03-23 17:32:28 +0100 | [diff] [blame] | 782 | iterate_extent_inodes(fs_info, found_key.objectid, |
| 783 | extent_item_pos, 1, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 784 | scrub_print_warning_inode, &swarn, false); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | out: |
| 788 | btrfs_free_path(path); |
Jan Schmidt | 558540c | 2011-06-13 19:59:12 +0200 | [diff] [blame] | 789 | } |
| 790 | |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 791 | static inline void scrub_get_recover(struct scrub_recover *recover) |
| 792 | { |
Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 793 | refcount_inc(&recover->refs); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 794 | } |
| 795 | |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 796 | static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, |
| 797 | struct scrub_recover *recover) |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 798 | { |
Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 799 | if (refcount_dec_and_test(&recover->refs)) { |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 800 | btrfs_bio_counter_dec(fs_info); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 801 | btrfs_put_bioc(recover->bioc); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 802 | kfree(recover); |
| 803 | } |
| 804 | } |
| 805 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 806 | /* |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 807 | * scrub_handle_errored_block gets called when either verification of the |
| 808 | * pages failed or the bio failed to read, e.g. with EIO. In the latter |
| 809 | * case, this function handles all pages in the bio, even though only one |
| 810 | * may be bad. |
| 811 | * The goal of this function is to repair the errored block by using the |
| 812 | * contents of one of the mirrors. |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 813 | */ |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 814 | static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 815 | { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 816 | struct scrub_ctx *sctx = sblock_to_check->sctx; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 817 | struct btrfs_device *dev; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 818 | struct btrfs_fs_info *fs_info; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 819 | u64 logical; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 820 | unsigned int failed_mirror_index; |
| 821 | unsigned int is_metadata; |
| 822 | unsigned int have_csum; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 823 | struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */ |
| 824 | struct scrub_block *sblock_bad; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 825 | int ret; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 826 | int mirror_index; |
| 827 | int page_num; |
| 828 | int success; |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 829 | bool full_stripe_locked; |
Filipe Manana | 7c3c7cb | 2018-12-07 13:23:32 +0000 | [diff] [blame] | 830 | unsigned int nofs_flag; |
David Sterba | 8bb1cf1 | 2020-08-17 12:12:38 +0200 | [diff] [blame] | 831 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 832 | DEFAULT_RATELIMIT_BURST); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 833 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 834 | BUG_ON(sblock_to_check->page_count < 1); |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 835 | fs_info = sctx->fs_info; |
Stefan Behrens | 4ded4f6 | 2012-11-14 18:57:29 +0000 | [diff] [blame] | 836 | if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) { |
| 837 | /* |
| 838 | * if we find an error in a super block, we just report it. |
| 839 | * They will get written with the next transaction commit |
| 840 | * anyway |
| 841 | */ |
| 842 | spin_lock(&sctx->stat_lock); |
| 843 | ++sctx->stat.super_errors; |
| 844 | spin_unlock(&sctx->stat_lock); |
| 845 | return 0; |
| 846 | } |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 847 | logical = sblock_to_check->pagev[0]->logical; |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 848 | BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1); |
| 849 | failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1; |
| 850 | is_metadata = !(sblock_to_check->pagev[0]->flags & |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 851 | BTRFS_EXTENT_FLAG_DATA); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 852 | have_csum = sblock_to_check->pagev[0]->have_csum; |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 853 | dev = sblock_to_check->pagev[0]->dev; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 854 | |
Naohiro Aota | f7ef528 | 2021-02-04 19:22:16 +0900 | [diff] [blame] | 855 | if (btrfs_is_zoned(fs_info) && !sctx->is_dev_replace) |
| 856 | return btrfs_repair_one_zone(fs_info, logical); |
| 857 | |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 858 | /* |
Filipe Manana | 7c3c7cb | 2018-12-07 13:23:32 +0000 | [diff] [blame] | 859 | * We must use GFP_NOFS because the scrub task might be waiting for a |
| 860 | * worker task executing this function and in turn a transaction commit |
| 861 | * might be waiting the scrub task to pause (which needs to wait for all |
| 862 | * the worker tasks to complete before pausing). |
| 863 | * We do allocations in the workers through insert_full_stripe_lock() |
| 864 | * and scrub_add_page_to_wr_bio(), which happens down the call chain of |
| 865 | * this function. |
| 866 | */ |
| 867 | nofs_flag = memalloc_nofs_save(); |
| 868 | /* |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 869 | * For RAID5/6, race can happen for a different device scrub thread. |
| 870 | * For data corruption, Parity and Data threads will both try |
| 871 | * to recovery the data. |
| 872 | * Race can lead to doubly added csum error, or even unrecoverable |
| 873 | * error. |
| 874 | */ |
| 875 | ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); |
| 876 | if (ret < 0) { |
Filipe Manana | 7c3c7cb | 2018-12-07 13:23:32 +0000 | [diff] [blame] | 877 | memalloc_nofs_restore(nofs_flag); |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 878 | spin_lock(&sctx->stat_lock); |
| 879 | if (ret == -ENOMEM) |
| 880 | sctx->stat.malloc_errors++; |
| 881 | sctx->stat.read_errors++; |
| 882 | sctx->stat.uncorrectable_errors++; |
| 883 | spin_unlock(&sctx->stat_lock); |
| 884 | return ret; |
| 885 | } |
| 886 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 887 | /* |
| 888 | * read all mirrors one after the other. This includes to |
| 889 | * re-read the extent or metadata block that failed (that was |
| 890 | * the cause that this fixup code is called) another time, |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 891 | * sector by sector this time in order to know which sectors |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 892 | * caused I/O errors and which ones are good (for all mirrors). |
| 893 | * It is the goal to handle the situation when more than one |
| 894 | * mirror contains I/O errors, but the errors do not |
| 895 | * overlap, i.e. the data can be repaired by selecting the |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 896 | * sectors from those mirrors without I/O error on the |
| 897 | * particular sectors. One example (with blocks >= 2 * sectorsize) |
| 898 | * would be that mirror #1 has an I/O error on the first sector, |
| 899 | * the second sector is good, and mirror #2 has an I/O error on |
| 900 | * the second sector, but the first sector is good. |
| 901 | * Then the first sector of the first mirror can be repaired by |
| 902 | * taking the first sector of the second mirror, and the |
| 903 | * second sector of the second mirror can be repaired by |
| 904 | * copying the contents of the 2nd sector of the 1st mirror. |
| 905 | * One more note: if the sectors of one mirror contain I/O |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 906 | * errors, the checksum cannot be verified. In order to get |
| 907 | * the best data for repairing, the first attempt is to find |
| 908 | * a mirror without I/O errors and with a validated checksum. |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 909 | * Only if this is not possible, the sectors are picked from |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 910 | * mirrors with I/O errors without considering the checksum. |
| 911 | * If the latter is the case, at the end, the checksum of the |
| 912 | * repaired area is verified in order to correctly maintain |
| 913 | * the statistics. |
| 914 | */ |
| 915 | |
David Sterba | 31e818f | 2015-02-20 18:00:26 +0100 | [diff] [blame] | 916 | sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS, |
Filipe Manana | 7c3c7cb | 2018-12-07 13:23:32 +0000 | [diff] [blame] | 917 | sizeof(*sblocks_for_recheck), GFP_KERNEL); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 918 | if (!sblocks_for_recheck) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 919 | spin_lock(&sctx->stat_lock); |
| 920 | sctx->stat.malloc_errors++; |
| 921 | sctx->stat.read_errors++; |
| 922 | sctx->stat.uncorrectable_errors++; |
| 923 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 924 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 925 | goto out; |
| 926 | } |
| 927 | |
| 928 | /* setup the context, map the logical blocks and alloc the pages */ |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 929 | ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 930 | if (ret) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 931 | spin_lock(&sctx->stat_lock); |
| 932 | sctx->stat.read_errors++; |
| 933 | sctx->stat.uncorrectable_errors++; |
| 934 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 935 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 936 | goto out; |
| 937 | } |
| 938 | BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); |
| 939 | sblock_bad = sblocks_for_recheck + failed_mirror_index; |
| 940 | |
| 941 | /* build and submit the bios for the failed mirror, check checksums */ |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 942 | scrub_recheck_block(fs_info, sblock_bad, 1); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 943 | |
| 944 | if (!sblock_bad->header_error && !sblock_bad->checksum_error && |
| 945 | sblock_bad->no_io_error_seen) { |
| 946 | /* |
| 947 | * the error disappeared after reading page by page, or |
| 948 | * the area was part of a huge bio and other parts of the |
| 949 | * bio caused I/O errors, or the block layer merged several |
| 950 | * read requests into one and the error is caused by a |
| 951 | * different bio (usually one of the two latter cases is |
| 952 | * the cause) |
| 953 | */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 954 | spin_lock(&sctx->stat_lock); |
| 955 | sctx->stat.unverified_errors++; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 956 | sblock_to_check->data_corrected = 1; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 957 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 958 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 959 | if (sctx->is_dev_replace) |
| 960 | scrub_write_block_to_dev_replace(sblock_bad); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 961 | goto out; |
| 962 | } |
| 963 | |
| 964 | if (!sblock_bad->no_io_error_seen) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 965 | spin_lock(&sctx->stat_lock); |
| 966 | sctx->stat.read_errors++; |
| 967 | spin_unlock(&sctx->stat_lock); |
David Sterba | 8bb1cf1 | 2020-08-17 12:12:38 +0200 | [diff] [blame] | 968 | if (__ratelimit(&rs)) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 969 | scrub_print_warning("i/o error", sblock_to_check); |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 970 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 971 | } else if (sblock_bad->checksum_error) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 972 | spin_lock(&sctx->stat_lock); |
| 973 | sctx->stat.csum_errors++; |
| 974 | spin_unlock(&sctx->stat_lock); |
David Sterba | 8bb1cf1 | 2020-08-17 12:12:38 +0200 | [diff] [blame] | 975 | if (__ratelimit(&rs)) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 976 | scrub_print_warning("checksum error", sblock_to_check); |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 977 | btrfs_dev_stat_inc_and_print(dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 978 | BTRFS_DEV_STAT_CORRUPTION_ERRS); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 979 | } else if (sblock_bad->header_error) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 980 | spin_lock(&sctx->stat_lock); |
| 981 | sctx->stat.verify_errors++; |
| 982 | spin_unlock(&sctx->stat_lock); |
David Sterba | 8bb1cf1 | 2020-08-17 12:12:38 +0200 | [diff] [blame] | 983 | if (__ratelimit(&rs)) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 984 | scrub_print_warning("checksum/header error", |
| 985 | sblock_to_check); |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 986 | if (sblock_bad->generation_error) |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 987 | btrfs_dev_stat_inc_and_print(dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 988 | BTRFS_DEV_STAT_GENERATION_ERRS); |
| 989 | else |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 990 | btrfs_dev_stat_inc_and_print(dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 991 | BTRFS_DEV_STAT_CORRUPTION_ERRS); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 992 | } |
| 993 | |
Ilya Dryomov | 33ef30a | 2013-11-03 19:06:38 +0200 | [diff] [blame] | 994 | if (sctx->readonly) { |
| 995 | ASSERT(!sctx->is_dev_replace); |
| 996 | goto out; |
| 997 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 998 | |
Qu Wenruo | 665d495 | 2018-07-11 13:41:21 +0800 | [diff] [blame] | 999 | /* |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1000 | * now build and submit the bios for the other mirrors, check |
Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1001 | * checksums. |
| 1002 | * First try to pick the mirror which is completely without I/O |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1003 | * errors and also does not have a checksum error. |
| 1004 | * If one is found, and if a checksum is present, the full block |
| 1005 | * that is known to contain an error is rewritten. Afterwards |
| 1006 | * the block is known to be corrected. |
| 1007 | * If a mirror is found which is completely correct, and no |
| 1008 | * checksum is present, only those pages are rewritten that had |
| 1009 | * an I/O error in the block to be repaired, since it cannot be |
| 1010 | * determined, which copy of the other pages is better (and it |
| 1011 | * could happen otherwise that a correct page would be |
| 1012 | * overwritten by a bad one). |
| 1013 | */ |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 1014 | for (mirror_index = 0; ;mirror_index++) { |
Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1015 | struct scrub_block *sblock_other; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1016 | |
Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1017 | if (mirror_index == failed_mirror_index) |
| 1018 | continue; |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 1019 | |
| 1020 | /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ |
| 1021 | if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { |
| 1022 | if (mirror_index >= BTRFS_MAX_MIRRORS) |
| 1023 | break; |
| 1024 | if (!sblocks_for_recheck[mirror_index].page_count) |
| 1025 | break; |
| 1026 | |
| 1027 | sblock_other = sblocks_for_recheck + mirror_index; |
| 1028 | } else { |
| 1029 | struct scrub_recover *r = sblock_bad->pagev[0]->recover; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1030 | int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs; |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 1031 | |
| 1032 | if (mirror_index >= max_allowed) |
| 1033 | break; |
| 1034 | if (!sblocks_for_recheck[1].page_count) |
| 1035 | break; |
| 1036 | |
| 1037 | ASSERT(failed_mirror_index == 0); |
| 1038 | sblock_other = sblocks_for_recheck + 1; |
| 1039 | sblock_other->pagev[0]->mirror_num = 1 + mirror_index; |
| 1040 | } |
Stefan Behrens | cb2ced7 | 2012-11-02 16:14:21 +0100 | [diff] [blame] | 1041 | |
| 1042 | /* build and submit the bios, check checksums */ |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1043 | scrub_recheck_block(fs_info, sblock_other, 0); |
Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1044 | |
| 1045 | if (!sblock_other->header_error && |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1046 | !sblock_other->checksum_error && |
| 1047 | sblock_other->no_io_error_seen) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1048 | if (sctx->is_dev_replace) { |
| 1049 | scrub_write_block_to_dev_replace(sblock_other); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1050 | goto corrected_error; |
Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1051 | } else { |
| 1052 | ret = scrub_repair_block_from_good_copy( |
| 1053 | sblock_bad, sblock_other); |
| 1054 | if (!ret) |
| 1055 | goto corrected_error; |
| 1056 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1057 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1058 | } |
| 1059 | |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1060 | if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) |
| 1061 | goto did_not_correct_error; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1062 | |
| 1063 | /* |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1064 | * In case of I/O errors in the area that is supposed to be |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1065 | * repaired, continue by picking good copies of those sectors. |
| 1066 | * Select the good sectors from mirrors to rewrite bad sectors from |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1067 | * the area to fix. Afterwards verify the checksum of the block |
| 1068 | * that is supposed to be repaired. This verification step is |
| 1069 | * only done for the purpose of statistic counting and for the |
| 1070 | * final scrub report, whether errors remain. |
| 1071 | * A perfect algorithm could make use of the checksum and try |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1072 | * all possible combinations of sectors from the different mirrors |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1073 | * until the checksum verification succeeds. For example, when |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1074 | * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1075 | * of mirror #2 is readable but the final checksum test fails, |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1076 | * then the 2nd sector of mirror #3 could be tried, whether now |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 1077 | * the final checksum succeeds. But this would be a rare |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1078 | * exception and is therefore not implemented. At least it is |
| 1079 | * avoided that the good copy is overwritten. |
| 1080 | * A more useful improvement would be to pick the sectors |
| 1081 | * without I/O error based on sector sizes (512 bytes on legacy |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1082 | * disks) instead of on sectorsize. Then maybe 512 byte of one |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1083 | * mirror could be repaired by taking 512 byte of a different |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1084 | * mirror, even if other 512 byte sectors in the same sectorsize |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1085 | * area are unreadable. |
| 1086 | */ |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1087 | success = 1; |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1088 | for (page_num = 0; page_num < sblock_bad->page_count; |
| 1089 | page_num++) { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1090 | struct scrub_page *spage_bad = sblock_bad->pagev[page_num]; |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1091 | struct scrub_block *sblock_other = NULL; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1092 | |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1093 | /* skip no-io-error page in scrub */ |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1094 | if (!spage_bad->io_error && !sctx->is_dev_replace) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1095 | continue; |
| 1096 | |
Liu Bo | 4759700 | 2018-03-02 16:10:41 -0700 | [diff] [blame] | 1097 | if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) { |
| 1098 | /* |
| 1099 | * In case of dev replace, if raid56 rebuild process |
| 1100 | * didn't work out correct data, then copy the content |
| 1101 | * in sblock_bad to make sure target device is identical |
| 1102 | * to source device, instead of writing garbage data in |
| 1103 | * sblock_for_recheck array to target device. |
| 1104 | */ |
| 1105 | sblock_other = NULL; |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1106 | } else if (spage_bad->io_error) { |
Liu Bo | 4759700 | 2018-03-02 16:10:41 -0700 | [diff] [blame] | 1107 | /* try to find no-io-error page in mirrors */ |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1108 | for (mirror_index = 0; |
| 1109 | mirror_index < BTRFS_MAX_MIRRORS && |
| 1110 | sblocks_for_recheck[mirror_index].page_count > 0; |
| 1111 | mirror_index++) { |
| 1112 | if (!sblocks_for_recheck[mirror_index]. |
| 1113 | pagev[page_num]->io_error) { |
| 1114 | sblock_other = sblocks_for_recheck + |
| 1115 | mirror_index; |
| 1116 | break; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1117 | } |
Jan Schmidt | 13db62b | 2011-06-13 19:56:13 +0200 | [diff] [blame] | 1118 | } |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1119 | if (!sblock_other) |
| 1120 | success = 0; |
Jan Schmidt | 13db62b | 2011-06-13 19:56:13 +0200 | [diff] [blame] | 1121 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1122 | |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1123 | if (sctx->is_dev_replace) { |
| 1124 | /* |
| 1125 | * did not find a mirror to fetch the page |
| 1126 | * from. scrub_write_page_to_dev_replace() |
| 1127 | * handles this case (page->io_error), by |
| 1128 | * filling the block with zeros before |
| 1129 | * submitting the write request |
| 1130 | */ |
| 1131 | if (!sblock_other) |
| 1132 | sblock_other = sblock_bad; |
| 1133 | |
| 1134 | if (scrub_write_page_to_dev_replace(sblock_other, |
| 1135 | page_num) != 0) { |
David Sterba | e37abe9 | 2018-04-04 17:20:52 +0200 | [diff] [blame] | 1136 | atomic64_inc( |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1137 | &fs_info->dev_replace.num_write_errors); |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1138 | success = 0; |
| 1139 | } |
| 1140 | } else if (sblock_other) { |
| 1141 | ret = scrub_repair_page_from_good_copy(sblock_bad, |
| 1142 | sblock_other, |
| 1143 | page_num, 0); |
| 1144 | if (0 == ret) |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1145 | spage_bad->io_error = 0; |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1146 | else |
| 1147 | success = 0; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1148 | } |
| 1149 | } |
| 1150 | |
Zhao Lei | b968fed | 2015-01-20 15:11:41 +0800 | [diff] [blame] | 1151 | if (success && !sctx->is_dev_replace) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1152 | if (is_metadata || have_csum) { |
| 1153 | /* |
| 1154 | * need to verify the checksum now that all |
| 1155 | * sectors on disk are repaired (the write |
| 1156 | * request for data to be repaired is on its way). |
| 1157 | * Just be lazy and use scrub_recheck_block() |
| 1158 | * which re-reads the data before the checksum |
| 1159 | * is verified, but most likely the data comes out |
| 1160 | * of the page cache. |
| 1161 | */ |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1162 | scrub_recheck_block(fs_info, sblock_bad, 1); |
Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1163 | if (!sblock_bad->header_error && |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1164 | !sblock_bad->checksum_error && |
| 1165 | sblock_bad->no_io_error_seen) |
| 1166 | goto corrected_error; |
| 1167 | else |
| 1168 | goto did_not_correct_error; |
| 1169 | } else { |
| 1170 | corrected_error: |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1171 | spin_lock(&sctx->stat_lock); |
| 1172 | sctx->stat.corrected_errors++; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1173 | sblock_to_check->data_corrected = 1; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1174 | spin_unlock(&sctx->stat_lock); |
David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 1175 | btrfs_err_rl_in_rcu(fs_info, |
| 1176 | "fixed up error at logical %llu on dev %s", |
Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 1177 | logical, rcu_str_deref(dev->name)); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1178 | } |
| 1179 | } else { |
| 1180 | did_not_correct_error: |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1181 | spin_lock(&sctx->stat_lock); |
| 1182 | sctx->stat.uncorrectable_errors++; |
| 1183 | spin_unlock(&sctx->stat_lock); |
David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 1184 | btrfs_err_rl_in_rcu(fs_info, |
| 1185 | "unable to fixup (regular) error at logical %llu on dev %s", |
Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 1186 | logical, rcu_str_deref(dev->name)); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1187 | } |
| 1188 | |
| 1189 | out: |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1190 | if (sblocks_for_recheck) { |
| 1191 | for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; |
| 1192 | mirror_index++) { |
| 1193 | struct scrub_block *sblock = sblocks_for_recheck + |
| 1194 | mirror_index; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1195 | struct scrub_recover *recover; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1196 | int page_index; |
| 1197 | |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1198 | for (page_index = 0; page_index < sblock->page_count; |
| 1199 | page_index++) { |
| 1200 | sblock->pagev[page_index]->sblock = NULL; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1201 | recover = sblock->pagev[page_index]->recover; |
| 1202 | if (recover) { |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1203 | scrub_put_recover(fs_info, recover); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1204 | sblock->pagev[page_index]->recover = |
| 1205 | NULL; |
| 1206 | } |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1207 | scrub_page_put(sblock->pagev[page_index]); |
| 1208 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1209 | } |
| 1210 | kfree(sblocks_for_recheck); |
| 1211 | } |
| 1212 | |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 1213 | ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); |
Filipe Manana | 7c3c7cb | 2018-12-07 13:23:32 +0000 | [diff] [blame] | 1214 | memalloc_nofs_restore(nofs_flag); |
Qu Wenruo | 28d70e2 | 2017-04-14 08:35:55 +0800 | [diff] [blame] | 1215 | if (ret < 0) |
| 1216 | return ret; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1217 | return 0; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1218 | } |
| 1219 | |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1220 | static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc) |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1221 | { |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1222 | if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5) |
Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1223 | return 2; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1224 | else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) |
Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1225 | return 3; |
| 1226 | else |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1227 | return (int)bioc->num_stripes; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1228 | } |
| 1229 | |
Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1230 | static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, |
| 1231 | u64 *raid_map, |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1232 | u64 mapped_length, |
| 1233 | int nstripes, int mirror, |
| 1234 | int *stripe_index, |
| 1235 | u64 *stripe_offset) |
| 1236 | { |
| 1237 | int i; |
| 1238 | |
Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 1239 | if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1240 | /* RAID5/6 */ |
| 1241 | for (i = 0; i < nstripes; i++) { |
| 1242 | if (raid_map[i] == RAID6_Q_STRIPE || |
| 1243 | raid_map[i] == RAID5_P_STRIPE) |
| 1244 | continue; |
| 1245 | |
| 1246 | if (logical >= raid_map[i] && |
| 1247 | logical < raid_map[i] + mapped_length) |
| 1248 | break; |
| 1249 | } |
| 1250 | |
| 1251 | *stripe_index = i; |
| 1252 | *stripe_offset = logical - raid_map[i]; |
| 1253 | } else { |
| 1254 | /* The other RAID type */ |
| 1255 | *stripe_index = mirror; |
| 1256 | *stripe_offset = 0; |
| 1257 | } |
| 1258 | } |
| 1259 | |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1260 | static int scrub_setup_recheck_block(struct scrub_block *original_sblock, |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1261 | struct scrub_block *sblocks_for_recheck) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1262 | { |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1263 | struct scrub_ctx *sctx = original_sblock->sctx; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1264 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1265 | u64 length = original_sblock->page_count * fs_info->sectorsize; |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1266 | u64 logical = original_sblock->pagev[0]->logical; |
Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1267 | u64 generation = original_sblock->pagev[0]->generation; |
| 1268 | u64 flags = original_sblock->pagev[0]->flags; |
| 1269 | u64 have_csum = original_sblock->pagev[0]->have_csum; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1270 | struct scrub_recover *recover; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1271 | struct btrfs_io_context *bioc; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1272 | u64 sublen; |
| 1273 | u64 mapped_length; |
| 1274 | u64 stripe_offset; |
| 1275 | int stripe_index; |
Zhao Lei | be50a8d | 2015-01-20 15:11:42 +0800 | [diff] [blame] | 1276 | int page_index = 0; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1277 | int mirror_index; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1278 | int nmirrors; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1279 | int ret; |
| 1280 | |
| 1281 | /* |
Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 1282 | * note: the two members refs and outstanding_pages |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1283 | * are not used (and not set) in the blocks that are used for |
| 1284 | * the recheck procedure |
| 1285 | */ |
| 1286 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1287 | while (length > 0) { |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1288 | sublen = min_t(u64, length, fs_info->sectorsize); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1289 | mapped_length = sublen; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1290 | bioc = NULL; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1291 | |
| 1292 | /* |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1293 | * With a length of sectorsize, each returned stripe represents |
| 1294 | * one mirror |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1295 | */ |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1296 | btrfs_bio_counter_inc_blocked(fs_info); |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 1297 | ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1298 | logical, &mapped_length, &bioc); |
| 1299 | if (ret || !bioc || mapped_length < sublen) { |
| 1300 | btrfs_put_bioc(bioc); |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1301 | btrfs_bio_counter_dec(fs_info); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1302 | return -EIO; |
| 1303 | } |
| 1304 | |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1305 | recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); |
| 1306 | if (!recover) { |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1307 | btrfs_put_bioc(bioc); |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1308 | btrfs_bio_counter_dec(fs_info); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1309 | return -ENOMEM; |
| 1310 | } |
| 1311 | |
Elena Reshetova | 6f61501 | 2017-03-03 10:55:21 +0200 | [diff] [blame] | 1312 | refcount_set(&recover->refs, 1); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1313 | recover->bioc = bioc; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1314 | recover->map_length = mapped_length; |
| 1315 | |
Ashish Samant | 2473114 | 2016-04-29 18:33:59 -0700 | [diff] [blame] | 1316 | BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1317 | |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1318 | nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS); |
Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1319 | |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1320 | for (mirror_index = 0; mirror_index < nmirrors; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1321 | mirror_index++) { |
| 1322 | struct scrub_block *sblock; |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1323 | struct scrub_page *spage; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1324 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1325 | sblock = sblocks_for_recheck + mirror_index; |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1326 | sblock->sctx = sctx; |
Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1327 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1328 | spage = kzalloc(sizeof(*spage), GFP_NOFS); |
| 1329 | if (!spage) { |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1330 | leave_nomem: |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1331 | spin_lock(&sctx->stat_lock); |
| 1332 | sctx->stat.malloc_errors++; |
| 1333 | spin_unlock(&sctx->stat_lock); |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1334 | scrub_put_recover(fs_info, recover); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1335 | return -ENOMEM; |
| 1336 | } |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1337 | scrub_page_get(spage); |
| 1338 | sblock->pagev[page_index] = spage; |
| 1339 | spage->sblock = sblock; |
| 1340 | spage->flags = flags; |
| 1341 | spage->generation = generation; |
| 1342 | spage->logical = logical; |
| 1343 | spage->have_csum = have_csum; |
Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1344 | if (have_csum) |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1345 | memcpy(spage->csum, |
Zhao Lei | 4734b7e | 2015-08-19 22:39:18 +0800 | [diff] [blame] | 1346 | original_sblock->pagev[0]->csum, |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 1347 | sctx->fs_info->csum_size); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1348 | |
Zhao Lei | 10f1190 | 2015-01-20 15:11:43 +0800 | [diff] [blame] | 1349 | scrub_stripe_index_and_offset(logical, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1350 | bioc->map_type, |
| 1351 | bioc->raid_map, |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1352 | mapped_length, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1353 | bioc->num_stripes - |
| 1354 | bioc->num_tgtdevs, |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1355 | mirror_index, |
| 1356 | &stripe_index, |
| 1357 | &stripe_offset); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1358 | spage->physical = bioc->stripes[stripe_index].physical + |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1359 | stripe_offset; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 1360 | spage->dev = bioc->stripes[stripe_index].dev; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1361 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1362 | BUG_ON(page_index >= original_sblock->page_count); |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1363 | spage->physical_for_dev_replace = |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1364 | original_sblock->pagev[page_index]-> |
| 1365 | physical_for_dev_replace; |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1366 | /* for missing devices, dev->bdev is NULL */ |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1367 | spage->mirror_num = mirror_index + 1; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1368 | sblock->page_count++; |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1369 | spage->page = alloc_page(GFP_NOFS); |
| 1370 | if (!spage->page) |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1371 | goto leave_nomem; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1372 | |
| 1373 | scrub_get_recover(recover); |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1374 | spage->recover = recover; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1375 | } |
Qu Wenruo | e501bfe | 2017-03-29 09:33:22 +0800 | [diff] [blame] | 1376 | scrub_put_recover(fs_info, recover); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1377 | length -= sublen; |
| 1378 | logical += sublen; |
| 1379 | page_index++; |
| 1380 | } |
| 1381 | |
| 1382 | return 0; |
| 1383 | } |
| 1384 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1385 | static void scrub_bio_wait_endio(struct bio *bio) |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1386 | { |
Liu Bo | b4ff5ad | 2017-11-30 17:26:39 -0700 | [diff] [blame] | 1387 | complete(bio->bi_private); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1388 | } |
| 1389 | |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1390 | static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, |
| 1391 | struct bio *bio, |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1392 | struct scrub_page *spage) |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1393 | { |
Liu Bo | b4ff5ad | 2017-11-30 17:26:39 -0700 | [diff] [blame] | 1394 | DECLARE_COMPLETION_ONSTACK(done); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1395 | int ret; |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 1396 | int mirror_num; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1397 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1398 | bio->bi_iter.bi_sector = spage->logical >> 9; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1399 | bio->bi_private = &done; |
| 1400 | bio->bi_end_io = scrub_bio_wait_endio; |
| 1401 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1402 | mirror_num = spage->sblock->pagev[0]->mirror_num; |
Qu Wenruo | 6a258d7 | 2021-09-23 14:00:09 +0800 | [diff] [blame] | 1403 | ret = raid56_parity_recover(bio, spage->recover->bioc, |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1404 | spage->recover->map_length, |
Liu Bo | 762221f | 2018-01-02 13:36:42 -0700 | [diff] [blame] | 1405 | mirror_num, 0); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1406 | if (ret) |
| 1407 | return ret; |
| 1408 | |
Liu Bo | b4ff5ad | 2017-11-30 17:26:39 -0700 | [diff] [blame] | 1409 | wait_for_completion_io(&done); |
| 1410 | return blk_status_to_errno(bio->bi_status); |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1411 | } |
| 1412 | |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1413 | static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, |
| 1414 | struct scrub_block *sblock) |
| 1415 | { |
| 1416 | struct scrub_page *first_page = sblock->pagev[0]; |
| 1417 | struct bio *bio; |
| 1418 | int page_num; |
| 1419 | |
| 1420 | /* All pages in sblock belong to the same stripe on the same device. */ |
| 1421 | ASSERT(first_page->dev); |
| 1422 | if (!first_page->dev->bdev) |
| 1423 | goto out; |
| 1424 | |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 1425 | bio = btrfs_bio_alloc(BIO_MAX_VECS); |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1426 | bio_set_dev(bio, first_page->dev->bdev); |
| 1427 | |
| 1428 | for (page_num = 0; page_num < sblock->page_count; page_num++) { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1429 | struct scrub_page *spage = sblock->pagev[page_num]; |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1430 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1431 | WARN_ON(!spage->page); |
| 1432 | bio_add_page(bio, spage->page, PAGE_SIZE, 0); |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1433 | } |
| 1434 | |
| 1435 | if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { |
| 1436 | bio_put(bio); |
| 1437 | goto out; |
| 1438 | } |
| 1439 | |
| 1440 | bio_put(bio); |
| 1441 | |
| 1442 | scrub_recheck_block_checksum(sblock); |
| 1443 | |
| 1444 | return; |
| 1445 | out: |
| 1446 | for (page_num = 0; page_num < sblock->page_count; page_num++) |
| 1447 | sblock->pagev[page_num]->io_error = 1; |
| 1448 | |
| 1449 | sblock->no_io_error_seen = 0; |
| 1450 | } |
| 1451 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1452 | /* |
| 1453 | * this function will check the on disk data for checksum errors, header |
| 1454 | * errors and read I/O errors. If any I/O errors happen, the exact pages |
| 1455 | * which are errored are marked as being bad. The goal is to enable scrub |
| 1456 | * to take those pages that are not errored from all the mirrors so that |
| 1457 | * the pages that are errored in the just handled mirror can be repaired. |
| 1458 | */ |
Stefan Behrens | 34f5c8e | 2012-11-02 16:16:26 +0100 | [diff] [blame] | 1459 | static void scrub_recheck_block(struct btrfs_fs_info *fs_info, |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 1460 | struct scrub_block *sblock, |
| 1461 | int retry_failed_mirror) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1462 | { |
| 1463 | int page_num; |
| 1464 | |
| 1465 | sblock->no_io_error_seen = 1; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1466 | |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1467 | /* short cut for raid56 */ |
| 1468 | if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0])) |
| 1469 | return scrub_recheck_block_on_raid56(fs_info, sblock); |
| 1470 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1471 | for (page_num = 0; page_num < sblock->page_count; page_num++) { |
| 1472 | struct bio *bio; |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1473 | struct scrub_page *spage = sblock->pagev[page_num]; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1474 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1475 | if (spage->dev->bdev == NULL) { |
| 1476 | spage->io_error = 1; |
Stefan Behrens | ea9947b | 2012-05-04 15:16:07 -0400 | [diff] [blame] | 1477 | sblock->no_io_error_seen = 0; |
| 1478 | continue; |
| 1479 | } |
| 1480 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1481 | WARN_ON(!spage->page); |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 1482 | bio = btrfs_bio_alloc(1); |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1483 | bio_set_dev(bio, spage->dev->bdev); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1484 | |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1485 | bio_add_page(bio, spage->page, fs_info->sectorsize, 0); |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1486 | bio->bi_iter.bi_sector = spage->physical >> 9; |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1487 | bio->bi_opf = REQ_OP_READ; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1488 | |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1489 | if (btrfsic_submit_bio_wait(bio)) { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1490 | spage->io_error = 1; |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 1491 | sblock->no_io_error_seen = 0; |
Miao Xie | af8e2d1 | 2014-10-23 14:42:50 +0800 | [diff] [blame] | 1492 | } |
Kent Overstreet | 33879d4 | 2013-11-23 22:33:32 -0800 | [diff] [blame] | 1493 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1494 | bio_put(bio); |
| 1495 | } |
| 1496 | |
| 1497 | if (sblock->no_io_error_seen) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1498 | scrub_recheck_block_checksum(sblock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1499 | } |
| 1500 | |
Miao Xie | 17a9be2 | 2014-07-24 11:37:08 +0800 | [diff] [blame] | 1501 | static inline int scrub_check_fsid(u8 fsid[], |
| 1502 | struct scrub_page *spage) |
| 1503 | { |
| 1504 | struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices; |
| 1505 | int ret; |
| 1506 | |
Anand Jain | 44880fd | 2017-07-29 17:50:09 +0800 | [diff] [blame] | 1507 | ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE); |
Miao Xie | 17a9be2 | 2014-07-24 11:37:08 +0800 | [diff] [blame] | 1508 | return !ret; |
| 1509 | } |
| 1510 | |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1511 | static void scrub_recheck_block_checksum(struct scrub_block *sblock) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1512 | { |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1513 | sblock->header_error = 0; |
| 1514 | sblock->checksum_error = 0; |
| 1515 | sblock->generation_error = 0; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1516 | |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1517 | if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA) |
| 1518 | scrub_checksum_data(sblock); |
| 1519 | else |
| 1520 | scrub_checksum_tree_block(sblock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1521 | } |
| 1522 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1523 | static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, |
Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1524 | struct scrub_block *sblock_good) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1525 | { |
| 1526 | int page_num; |
| 1527 | int ret = 0; |
| 1528 | |
| 1529 | for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { |
| 1530 | int ret_sub; |
| 1531 | |
| 1532 | ret_sub = scrub_repair_page_from_good_copy(sblock_bad, |
| 1533 | sblock_good, |
Zhao Lei | 114ab50 | 2015-01-20 15:11:36 +0800 | [diff] [blame] | 1534 | page_num, 1); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1535 | if (ret_sub) |
| 1536 | ret = ret_sub; |
| 1537 | } |
| 1538 | |
| 1539 | return ret; |
| 1540 | } |
| 1541 | |
| 1542 | static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, |
| 1543 | struct scrub_block *sblock_good, |
| 1544 | int page_num, int force_write) |
| 1545 | { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1546 | struct scrub_page *spage_bad = sblock_bad->pagev[page_num]; |
| 1547 | struct scrub_page *spage_good = sblock_good->pagev[page_num]; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1548 | struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1549 | const u32 sectorsize = fs_info->sectorsize; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1550 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1551 | BUG_ON(spage_bad->page == NULL); |
| 1552 | BUG_ON(spage_good->page == NULL); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1553 | if (force_write || sblock_bad->header_error || |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1554 | sblock_bad->checksum_error || spage_bad->io_error) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1555 | struct bio *bio; |
| 1556 | int ret; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1557 | |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1558 | if (!spage_bad->dev->bdev) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1559 | btrfs_warn_rl(fs_info, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 1560 | "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected"); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1561 | return -EIO; |
| 1562 | } |
| 1563 | |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 1564 | bio = btrfs_bio_alloc(1); |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1565 | bio_set_dev(bio, spage_bad->dev->bdev); |
| 1566 | bio->bi_iter.bi_sector = spage_bad->physical >> 9; |
David Sterba | ebcc326 | 2018-06-29 10:56:53 +0200 | [diff] [blame] | 1567 | bio->bi_opf = REQ_OP_WRITE; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1568 | |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1569 | ret = bio_add_page(bio, spage_good->page, sectorsize, 0); |
| 1570 | if (ret != sectorsize) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1571 | bio_put(bio); |
| 1572 | return -EIO; |
| 1573 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1574 | |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1575 | if (btrfsic_submit_bio_wait(bio)) { |
Qu Wenruo | 261d2dc | 2020-11-03 21:31:01 +0800 | [diff] [blame] | 1576 | btrfs_dev_stat_inc_and_print(spage_bad->dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1577 | BTRFS_DEV_STAT_WRITE_ERRS); |
David Sterba | e37abe9 | 2018-04-04 17:20:52 +0200 | [diff] [blame] | 1578 | atomic64_inc(&fs_info->dev_replace.num_write_errors); |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1579 | bio_put(bio); |
| 1580 | return -EIO; |
| 1581 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1582 | bio_put(bio); |
| 1583 | } |
| 1584 | |
| 1585 | return 0; |
| 1586 | } |
| 1587 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1588 | static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) |
| 1589 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1590 | struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1591 | int page_num; |
| 1592 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1593 | /* |
| 1594 | * This block is used for the check of the parity on the source device, |
| 1595 | * so the data needn't be written into the destination device. |
| 1596 | */ |
| 1597 | if (sblock->sparity) |
| 1598 | return; |
| 1599 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1600 | for (page_num = 0; page_num < sblock->page_count; page_num++) { |
| 1601 | int ret; |
| 1602 | |
| 1603 | ret = scrub_write_page_to_dev_replace(sblock, page_num); |
| 1604 | if (ret) |
David Sterba | e37abe9 | 2018-04-04 17:20:52 +0200 | [diff] [blame] | 1605 | atomic64_inc(&fs_info->dev_replace.num_write_errors); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1606 | } |
| 1607 | } |
| 1608 | |
| 1609 | static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, |
| 1610 | int page_num) |
| 1611 | { |
| 1612 | struct scrub_page *spage = sblock->pagev[page_num]; |
| 1613 | |
| 1614 | BUG_ON(spage->page == NULL); |
David Sterba | a8b3a89 | 2020-05-29 15:26:07 +0200 | [diff] [blame] | 1615 | if (spage->io_error) |
| 1616 | clear_page(page_address(spage->page)); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1617 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1618 | return scrub_add_page_to_wr_bio(sblock->sctx, spage); |
| 1619 | } |
| 1620 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 1621 | static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical) |
| 1622 | { |
| 1623 | int ret = 0; |
| 1624 | u64 length; |
| 1625 | |
| 1626 | if (!btrfs_is_zoned(sctx->fs_info)) |
| 1627 | return 0; |
| 1628 | |
Naohiro Aota | 7db1c5d | 2021-02-04 19:22:14 +0900 | [diff] [blame] | 1629 | if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) |
| 1630 | return 0; |
| 1631 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 1632 | if (sctx->write_pointer < physical) { |
| 1633 | length = physical - sctx->write_pointer; |
| 1634 | |
| 1635 | ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev, |
| 1636 | sctx->write_pointer, length); |
| 1637 | if (!ret) |
| 1638 | sctx->write_pointer = physical; |
| 1639 | } |
| 1640 | return ret; |
| 1641 | } |
| 1642 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1643 | static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, |
| 1644 | struct scrub_page *spage) |
| 1645 | { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1646 | struct scrub_bio *sbio; |
| 1647 | int ret; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1648 | const u32 sectorsize = sctx->fs_info->sectorsize; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1649 | |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1650 | mutex_lock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1651 | again: |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1652 | if (!sctx->wr_curr_bio) { |
| 1653 | sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 1654 | GFP_KERNEL); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1655 | if (!sctx->wr_curr_bio) { |
| 1656 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1657 | return -ENOMEM; |
| 1658 | } |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1659 | sctx->wr_curr_bio->sctx = sctx; |
| 1660 | sctx->wr_curr_bio->page_count = 0; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1661 | } |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1662 | sbio = sctx->wr_curr_bio; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1663 | if (sbio->page_count == 0) { |
| 1664 | struct bio *bio; |
| 1665 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 1666 | ret = fill_writer_pointer_gap(sctx, |
| 1667 | spage->physical_for_dev_replace); |
| 1668 | if (ret) { |
| 1669 | mutex_unlock(&sctx->wr_lock); |
| 1670 | return ret; |
| 1671 | } |
| 1672 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1673 | sbio->physical = spage->physical_for_dev_replace; |
| 1674 | sbio->logical = spage->logical; |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1675 | sbio->dev = sctx->wr_tgtdev; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1676 | bio = sbio->bio; |
| 1677 | if (!bio) { |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 1678 | bio = btrfs_bio_alloc(sctx->pages_per_wr_bio); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1679 | sbio->bio = bio; |
| 1680 | } |
| 1681 | |
| 1682 | bio->bi_private = sbio; |
| 1683 | bio->bi_end_io = scrub_wr_bio_end_io; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1684 | bio_set_dev(bio, sbio->dev->bdev); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1685 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
David Sterba | ebcc326 | 2018-06-29 10:56:53 +0200 | [diff] [blame] | 1686 | bio->bi_opf = REQ_OP_WRITE; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1687 | sbio->status = 0; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1688 | } else if (sbio->physical + sbio->page_count * sectorsize != |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1689 | spage->physical_for_dev_replace || |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1690 | sbio->logical + sbio->page_count * sectorsize != |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1691 | spage->logical) { |
| 1692 | scrub_wr_submit(sctx); |
| 1693 | goto again; |
| 1694 | } |
| 1695 | |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1696 | ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0); |
| 1697 | if (ret != sectorsize) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1698 | if (sbio->page_count < 1) { |
| 1699 | bio_put(sbio->bio); |
| 1700 | sbio->bio = NULL; |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1701 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1702 | return -EIO; |
| 1703 | } |
| 1704 | scrub_wr_submit(sctx); |
| 1705 | goto again; |
| 1706 | } |
| 1707 | |
| 1708 | sbio->pagev[sbio->page_count] = spage; |
| 1709 | scrub_page_get(spage); |
| 1710 | sbio->page_count++; |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1711 | if (sbio->page_count == sctx->pages_per_wr_bio) |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1712 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1713 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1714 | |
| 1715 | return 0; |
| 1716 | } |
| 1717 | |
| 1718 | static void scrub_wr_submit(struct scrub_ctx *sctx) |
| 1719 | { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1720 | struct scrub_bio *sbio; |
| 1721 | |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1722 | if (!sctx->wr_curr_bio) |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1723 | return; |
| 1724 | |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 1725 | sbio = sctx->wr_curr_bio; |
| 1726 | sctx->wr_curr_bio = NULL; |
Christoph Hellwig | 309dca30 | 2021-01-24 11:02:34 +0100 | [diff] [blame] | 1727 | WARN_ON(!sbio->bio->bi_bdev); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1728 | scrub_pending_bio_inc(sctx); |
| 1729 | /* process all writes in a single worker thread. Then the block layer |
| 1730 | * orders the requests before sending them to the driver which |
| 1731 | * doubled the write performance on spinning disks when measured |
| 1732 | * with Linux 3.5 */ |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1733 | btrfsic_submit_bio(sbio->bio); |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 1734 | |
| 1735 | if (btrfs_is_zoned(sctx->fs_info)) |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 1736 | sctx->write_pointer = sbio->physical + sbio->page_count * |
| 1737 | sctx->fs_info->sectorsize; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1738 | } |
| 1739 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1740 | static void scrub_wr_bio_end_io(struct bio *bio) |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1741 | { |
| 1742 | struct scrub_bio *sbio = bio->bi_private; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1743 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1744 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1745 | sbio->status = bio->bi_status; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1746 | sbio->bio = bio; |
| 1747 | |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 1748 | btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); |
Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 1749 | btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1750 | } |
| 1751 | |
| 1752 | static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) |
| 1753 | { |
| 1754 | struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); |
| 1755 | struct scrub_ctx *sctx = sbio->sctx; |
| 1756 | int i; |
| 1757 | |
| 1758 | WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1759 | if (sbio->status) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1760 | struct btrfs_dev_replace *dev_replace = |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 1761 | &sbio->sctx->fs_info->dev_replace; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1762 | |
| 1763 | for (i = 0; i < sbio->page_count; i++) { |
| 1764 | struct scrub_page *spage = sbio->pagev[i]; |
| 1765 | |
| 1766 | spage->io_error = 1; |
David Sterba | e37abe9 | 2018-04-04 17:20:52 +0200 | [diff] [blame] | 1767 | atomic64_inc(&dev_replace->num_write_errors); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1768 | } |
| 1769 | } |
| 1770 | |
| 1771 | for (i = 0; i < sbio->page_count; i++) |
| 1772 | scrub_page_put(sbio->pagev[i]); |
| 1773 | |
| 1774 | bio_put(sbio->bio); |
| 1775 | kfree(sbio); |
| 1776 | scrub_pending_bio_dec(sctx); |
| 1777 | } |
| 1778 | |
| 1779 | static int scrub_checksum(struct scrub_block *sblock) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1780 | { |
| 1781 | u64 flags; |
| 1782 | int ret; |
| 1783 | |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1784 | /* |
| 1785 | * No need to initialize these stats currently, |
| 1786 | * because this function only use return value |
| 1787 | * instead of these stats value. |
| 1788 | * |
| 1789 | * Todo: |
| 1790 | * always use stats |
| 1791 | */ |
| 1792 | sblock->header_error = 0; |
| 1793 | sblock->generation_error = 0; |
| 1794 | sblock->checksum_error = 0; |
| 1795 | |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1796 | WARN_ON(sblock->page_count < 1); |
| 1797 | flags = sblock->pagev[0]->flags; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1798 | ret = 0; |
| 1799 | if (flags & BTRFS_EXTENT_FLAG_DATA) |
| 1800 | ret = scrub_checksum_data(sblock); |
| 1801 | else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) |
| 1802 | ret = scrub_checksum_tree_block(sblock); |
| 1803 | else if (flags & BTRFS_EXTENT_FLAG_SUPER) |
| 1804 | (void)scrub_checksum_super(sblock); |
| 1805 | else |
| 1806 | WARN_ON(1); |
| 1807 | if (ret) |
| 1808 | scrub_handle_errored_block(sblock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 1809 | |
| 1810 | return ret; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1811 | } |
| 1812 | |
| 1813 | static int scrub_checksum_data(struct scrub_block *sblock) |
| 1814 | { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1815 | struct scrub_ctx *sctx = sblock->sctx; |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1816 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
| 1817 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1818 | u8 csum[BTRFS_CSUM_SIZE]; |
David Sterba | d41ebef | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1819 | struct scrub_page *spage; |
David Sterba | b048525 | 2020-05-29 15:32:51 +0200 | [diff] [blame] | 1820 | char *kaddr; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1821 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1822 | BUG_ON(sblock->page_count < 1); |
David Sterba | d41ebef | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1823 | spage = sblock->pagev[0]; |
| 1824 | if (!spage->have_csum) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1825 | return 0; |
| 1826 | |
David Sterba | d41ebef | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1827 | kaddr = page_address(spage->page); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1828 | |
David Sterba | 771aba0 | 2020-05-29 15:54:41 +0200 | [diff] [blame] | 1829 | shash->tfm = fs_info->csum_shash; |
| 1830 | crypto_shash_init(shash); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1831 | |
Qu Wenruo | b29dca4 | 2020-12-02 14:48:10 +0800 | [diff] [blame] | 1832 | /* |
| 1833 | * In scrub_pages() and scrub_pages_for_parity() we ensure each spage |
| 1834 | * only contains one sector of data. |
| 1835 | */ |
| 1836 | crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); |
| 1837 | |
| 1838 | if (memcmp(csum, spage->csum, fs_info->csum_size)) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1839 | sblock->checksum_error = 1; |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1840 | return sblock->checksum_error; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1841 | } |
| 1842 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1843 | static int scrub_checksum_tree_block(struct scrub_block *sblock) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1844 | { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1845 | struct scrub_ctx *sctx = sblock->sctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1846 | struct btrfs_header *h; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1847 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1848 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1849 | u8 calculated_csum[BTRFS_CSUM_SIZE]; |
| 1850 | u8 on_disk_csum[BTRFS_CSUM_SIZE]; |
Qu Wenruo | 53f3251 | 2020-12-02 14:48:09 +0800 | [diff] [blame] | 1851 | /* |
| 1852 | * This is done in sectorsize steps even for metadata as there's a |
| 1853 | * constraint for nodesize to be aligned to sectorsize. This will need |
| 1854 | * to change so we don't misuse data and metadata units like that. |
| 1855 | */ |
| 1856 | const u32 sectorsize = sctx->fs_info->sectorsize; |
| 1857 | const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits; |
David Sterba | 521e102 | 2020-05-29 15:54:41 +0200 | [diff] [blame] | 1858 | int i; |
David Sterba | 100aa5d | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1859 | struct scrub_page *spage; |
David Sterba | b048525 | 2020-05-29 15:32:51 +0200 | [diff] [blame] | 1860 | char *kaddr; |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1861 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1862 | BUG_ON(sblock->page_count < 1); |
Qu Wenruo | 53f3251 | 2020-12-02 14:48:09 +0800 | [diff] [blame] | 1863 | |
| 1864 | /* Each member in pagev is just one block, not a full page */ |
| 1865 | ASSERT(sblock->page_count == num_sectors); |
| 1866 | |
David Sterba | 100aa5d | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1867 | spage = sblock->pagev[0]; |
| 1868 | kaddr = page_address(spage->page); |
David Sterba | b048525 | 2020-05-29 15:32:51 +0200 | [diff] [blame] | 1869 | h = (struct btrfs_header *)kaddr; |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 1870 | memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1871 | |
| 1872 | /* |
| 1873 | * we don't use the getter functions here, as we |
| 1874 | * a) don't have an extent buffer and |
| 1875 | * b) the page is already kmapped |
| 1876 | */ |
David Sterba | 100aa5d | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1877 | if (spage->logical != btrfs_stack_header_bytenr(h)) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1878 | sblock->header_error = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1879 | |
David Sterba | 100aa5d | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1880 | if (spage->generation != btrfs_stack_header_generation(h)) { |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1881 | sblock->header_error = 1; |
| 1882 | sblock->generation_error = 1; |
| 1883 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1884 | |
David Sterba | 100aa5d | 2020-05-29 16:20:35 +0200 | [diff] [blame] | 1885 | if (!scrub_check_fsid(h->fsid, spage)) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1886 | sblock->header_error = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1887 | |
| 1888 | if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, |
| 1889 | BTRFS_UUID_SIZE)) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1890 | sblock->header_error = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1891 | |
David Sterba | 521e102 | 2020-05-29 15:54:41 +0200 | [diff] [blame] | 1892 | shash->tfm = fs_info->csum_shash; |
| 1893 | crypto_shash_init(shash); |
| 1894 | crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, |
Qu Wenruo | 53f3251 | 2020-12-02 14:48:09 +0800 | [diff] [blame] | 1895 | sectorsize - BTRFS_CSUM_SIZE); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1896 | |
Qu Wenruo | 53f3251 | 2020-12-02 14:48:09 +0800 | [diff] [blame] | 1897 | for (i = 1; i < num_sectors; i++) { |
David Sterba | 521e102 | 2020-05-29 15:54:41 +0200 | [diff] [blame] | 1898 | kaddr = page_address(sblock->pagev[i]->page); |
Qu Wenruo | 53f3251 | 2020-12-02 14:48:09 +0800 | [diff] [blame] | 1899 | crypto_shash_update(shash, kaddr, sectorsize); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1900 | } |
| 1901 | |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1902 | crypto_shash_final(shash, calculated_csum); |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 1903 | if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1904 | sblock->checksum_error = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1905 | |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 1906 | return sblock->header_error || sblock->checksum_error; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1907 | } |
| 1908 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1909 | static int scrub_checksum_super(struct scrub_block *sblock) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1910 | { |
| 1911 | struct btrfs_super_block *s; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1912 | struct scrub_ctx *sctx = sblock->sctx; |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1913 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
| 1914 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1915 | u8 calculated_csum[BTRFS_CSUM_SIZE]; |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1916 | struct scrub_page *spage; |
David Sterba | b048525 | 2020-05-29 15:32:51 +0200 | [diff] [blame] | 1917 | char *kaddr; |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1918 | int fail_gen = 0; |
| 1919 | int fail_cor = 0; |
Johannes Thumshirn | d517857 | 2019-06-03 16:58:57 +0200 | [diff] [blame] | 1920 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1921 | BUG_ON(sblock->page_count < 1); |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1922 | spage = sblock->pagev[0]; |
| 1923 | kaddr = page_address(spage->page); |
David Sterba | b048525 | 2020-05-29 15:32:51 +0200 | [diff] [blame] | 1924 | s = (struct btrfs_super_block *)kaddr; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1925 | |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1926 | if (spage->logical != btrfs_super_bytenr(s)) |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1927 | ++fail_cor; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1928 | |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1929 | if (spage->generation != btrfs_super_generation(s)) |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1930 | ++fail_gen; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1931 | |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1932 | if (!scrub_check_fsid(s->fsid, spage)) |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1933 | ++fail_cor; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1934 | |
David Sterba | 83cf6d5 | 2020-05-29 15:40:36 +0200 | [diff] [blame] | 1935 | shash->tfm = fs_info->csum_shash; |
| 1936 | crypto_shash_init(shash); |
| 1937 | crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE, |
| 1938 | BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1939 | |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 1940 | if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size)) |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1941 | ++fail_cor; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1942 | |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1943 | if (fail_cor + fail_gen) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1944 | /* |
| 1945 | * if we find an error in a super block, we just report it. |
| 1946 | * They will get written with the next transaction commit |
| 1947 | * anyway |
| 1948 | */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 1949 | spin_lock(&sctx->stat_lock); |
| 1950 | ++sctx->stat.super_errors; |
| 1951 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1952 | if (fail_cor) |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1953 | btrfs_dev_stat_inc_and_print(spage->dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1954 | BTRFS_DEV_STAT_CORRUPTION_ERRS); |
| 1955 | else |
David Sterba | c746054 | 2020-05-29 15:47:05 +0200 | [diff] [blame] | 1956 | btrfs_dev_stat_inc_and_print(spage->dev, |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1957 | BTRFS_DEV_STAT_GENERATION_ERRS); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1958 | } |
| 1959 | |
Stefan Behrens | 442a4f6 | 2012-05-25 16:06:08 +0200 | [diff] [blame] | 1960 | return fail_cor + fail_gen; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 1961 | } |
| 1962 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1963 | static void scrub_block_get(struct scrub_block *sblock) |
| 1964 | { |
Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 1965 | refcount_inc(&sblock->refs); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1966 | } |
| 1967 | |
| 1968 | static void scrub_block_put(struct scrub_block *sblock) |
| 1969 | { |
Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 1970 | if (refcount_dec_and_test(&sblock->refs)) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1971 | int i; |
| 1972 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 1973 | if (sblock->sparity) |
| 1974 | scrub_parity_put(sblock->sparity); |
| 1975 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1976 | for (i = 0; i < sblock->page_count; i++) |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1977 | scrub_page_put(sblock->pagev[i]); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 1978 | kfree(sblock); |
| 1979 | } |
| 1980 | } |
| 1981 | |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1982 | static void scrub_page_get(struct scrub_page *spage) |
| 1983 | { |
Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 1984 | atomic_inc(&spage->refs); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1985 | } |
| 1986 | |
| 1987 | static void scrub_page_put(struct scrub_page *spage) |
| 1988 | { |
Zhao Lei | 5701934 | 2015-01-20 15:11:45 +0800 | [diff] [blame] | 1989 | if (atomic_dec_and_test(&spage->refs)) { |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 1990 | if (spage->page) |
| 1991 | __free_page(spage->page); |
| 1992 | kfree(spage); |
| 1993 | } |
| 1994 | } |
| 1995 | |
David Sterba | eb3b505 | 2019-10-09 13:58:13 +0200 | [diff] [blame] | 1996 | /* |
| 1997 | * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 |
| 1998 | * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max. |
| 1999 | */ |
| 2000 | static void scrub_throttle(struct scrub_ctx *sctx) |
| 2001 | { |
| 2002 | const int time_slice = 1000; |
| 2003 | struct scrub_bio *sbio; |
| 2004 | struct btrfs_device *device; |
| 2005 | s64 delta; |
| 2006 | ktime_t now; |
| 2007 | u32 div; |
| 2008 | u64 bwlimit; |
| 2009 | |
| 2010 | sbio = sctx->bios[sctx->curr]; |
| 2011 | device = sbio->dev; |
| 2012 | bwlimit = READ_ONCE(device->scrub_speed_max); |
| 2013 | if (bwlimit == 0) |
| 2014 | return; |
| 2015 | |
| 2016 | /* |
| 2017 | * Slice is divided into intervals when the IO is submitted, adjust by |
| 2018 | * bwlimit and maximum of 64 intervals. |
| 2019 | */ |
| 2020 | div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024))); |
| 2021 | div = min_t(u32, 64, div); |
| 2022 | |
| 2023 | /* Start new epoch, set deadline */ |
| 2024 | now = ktime_get(); |
| 2025 | if (sctx->throttle_deadline == 0) { |
| 2026 | sctx->throttle_deadline = ktime_add_ms(now, time_slice / div); |
| 2027 | sctx->throttle_sent = 0; |
| 2028 | } |
| 2029 | |
| 2030 | /* Still in the time to send? */ |
| 2031 | if (ktime_before(now, sctx->throttle_deadline)) { |
| 2032 | /* If current bio is within the limit, send it */ |
| 2033 | sctx->throttle_sent += sbio->bio->bi_iter.bi_size; |
| 2034 | if (sctx->throttle_sent <= div_u64(bwlimit, div)) |
| 2035 | return; |
| 2036 | |
| 2037 | /* We're over the limit, sleep until the rest of the slice */ |
| 2038 | delta = ktime_ms_delta(sctx->throttle_deadline, now); |
| 2039 | } else { |
| 2040 | /* New request after deadline, start new epoch */ |
| 2041 | delta = 0; |
| 2042 | } |
| 2043 | |
| 2044 | if (delta) { |
| 2045 | long timeout; |
| 2046 | |
| 2047 | timeout = div_u64(delta * HZ, 1000); |
| 2048 | schedule_timeout_interruptible(timeout); |
| 2049 | } |
| 2050 | |
| 2051 | /* Next call will start the deadline period */ |
| 2052 | sctx->throttle_deadline = 0; |
| 2053 | } |
| 2054 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2055 | static void scrub_submit(struct scrub_ctx *sctx) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2056 | { |
| 2057 | struct scrub_bio *sbio; |
| 2058 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2059 | if (sctx->curr == -1) |
Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 2060 | return; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2061 | |
David Sterba | eb3b505 | 2019-10-09 13:58:13 +0200 | [diff] [blame] | 2062 | scrub_throttle(sctx); |
| 2063 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2064 | sbio = sctx->bios[sctx->curr]; |
| 2065 | sctx->curr = -1; |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 2066 | scrub_pending_bio_inc(sctx); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 2067 | btrfsic_submit_bio(sbio->bio); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2068 | } |
| 2069 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2070 | static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, |
| 2071 | struct scrub_page *spage) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2072 | { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2073 | struct scrub_block *sblock = spage->sblock; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2074 | struct scrub_bio *sbio; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 2075 | const u32 sectorsize = sctx->fs_info->sectorsize; |
Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2076 | int ret; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2077 | |
| 2078 | again: |
| 2079 | /* |
| 2080 | * grab a fresh bio or wait for one to become available |
| 2081 | */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2082 | while (sctx->curr == -1) { |
| 2083 | spin_lock(&sctx->list_lock); |
| 2084 | sctx->curr = sctx->first_free; |
| 2085 | if (sctx->curr != -1) { |
| 2086 | sctx->first_free = sctx->bios[sctx->curr]->next_free; |
| 2087 | sctx->bios[sctx->curr]->next_free = -1; |
| 2088 | sctx->bios[sctx->curr]->page_count = 0; |
| 2089 | spin_unlock(&sctx->list_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2090 | } else { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2091 | spin_unlock(&sctx->list_lock); |
| 2092 | wait_event(sctx->list_wait, sctx->first_free != -1); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2093 | } |
| 2094 | } |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2095 | sbio = sctx->bios[sctx->curr]; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2096 | if (sbio->page_count == 0) { |
Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2097 | struct bio *bio; |
| 2098 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2099 | sbio->physical = spage->physical; |
| 2100 | sbio->logical = spage->logical; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2101 | sbio->dev = spage->dev; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2102 | bio = sbio->bio; |
| 2103 | if (!bio) { |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 2104 | bio = btrfs_bio_alloc(sctx->pages_per_rd_bio); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2105 | sbio->bio = bio; |
| 2106 | } |
Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2107 | |
| 2108 | bio->bi_private = sbio; |
| 2109 | bio->bi_end_io = scrub_bio_end_io; |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 2110 | bio_set_dev(bio, sbio->dev->bdev); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 2111 | bio->bi_iter.bi_sector = sbio->physical >> 9; |
David Sterba | ebcc326 | 2018-06-29 10:56:53 +0200 | [diff] [blame] | 2112 | bio->bi_opf = REQ_OP_READ; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 2113 | sbio->status = 0; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 2114 | } else if (sbio->physical + sbio->page_count * sectorsize != |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2115 | spage->physical || |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 2116 | sbio->logical + sbio->page_count * sectorsize != |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2117 | spage->logical || |
| 2118 | sbio->dev != spage->dev) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2119 | scrub_submit(sctx); |
Arne Jansen | 69f4cb5 | 2011-11-11 08:17:10 -0500 | [diff] [blame] | 2120 | goto again; |
| 2121 | } |
| 2122 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2123 | sbio->pagev[sbio->page_count] = spage; |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 2124 | ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0); |
| 2125 | if (ret != sectorsize) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2126 | if (sbio->page_count < 1) { |
| 2127 | bio_put(sbio->bio); |
| 2128 | sbio->bio = NULL; |
| 2129 | return -EIO; |
| 2130 | } |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2131 | scrub_submit(sctx); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2132 | goto again; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2133 | } |
Arne Jansen | 1bc8779 | 2011-05-28 21:57:55 +0200 | [diff] [blame] | 2134 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2135 | scrub_block_get(sblock); /* one for the page added to the bio */ |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2136 | atomic_inc(&sblock->outstanding_pages); |
| 2137 | sbio->page_count++; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2138 | if (sbio->page_count == sctx->pages_per_rd_bio) |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2139 | scrub_submit(sctx); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2140 | |
| 2141 | return 0; |
| 2142 | } |
| 2143 | |
Linus Torvalds | 2236597 | 2015-09-05 15:14:43 -0700 | [diff] [blame] | 2144 | static void scrub_missing_raid56_end_io(struct bio *bio) |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2145 | { |
| 2146 | struct scrub_block *sblock = bio->bi_private; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2147 | struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2148 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 2149 | if (bio->bi_status) |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2150 | sblock->no_io_error_seen = 0; |
| 2151 | |
Scott Talbert | 4673272 | 2016-05-09 09:14:28 -0400 | [diff] [blame] | 2152 | bio_put(bio); |
| 2153 | |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2154 | btrfs_queue_work(fs_info->scrub_workers, &sblock->work); |
| 2155 | } |
| 2156 | |
| 2157 | static void scrub_missing_raid56_worker(struct btrfs_work *work) |
| 2158 | { |
| 2159 | struct scrub_block *sblock = container_of(work, struct scrub_block, work); |
| 2160 | struct scrub_ctx *sctx = sblock->sctx; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2161 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2162 | u64 logical; |
| 2163 | struct btrfs_device *dev; |
| 2164 | |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2165 | logical = sblock->pagev[0]->logical; |
| 2166 | dev = sblock->pagev[0]->dev; |
| 2167 | |
Zhao Lei | affe4a5 | 2015-08-24 21:32:06 +0800 | [diff] [blame] | 2168 | if (sblock->no_io_error_seen) |
Zhao Lei | ba7cf98 | 2015-08-24 21:18:02 +0800 | [diff] [blame] | 2169 | scrub_recheck_block_checksum(sblock); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2170 | |
| 2171 | if (!sblock->no_io_error_seen) { |
| 2172 | spin_lock(&sctx->stat_lock); |
| 2173 | sctx->stat.read_errors++; |
| 2174 | spin_unlock(&sctx->stat_lock); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2175 | btrfs_err_rl_in_rcu(fs_info, |
David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 2176 | "IO error rebuilding logical %llu for dev %s", |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2177 | logical, rcu_str_deref(dev->name)); |
| 2178 | } else if (sblock->header_error || sblock->checksum_error) { |
| 2179 | spin_lock(&sctx->stat_lock); |
| 2180 | sctx->stat.uncorrectable_errors++; |
| 2181 | spin_unlock(&sctx->stat_lock); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2182 | btrfs_err_rl_in_rcu(fs_info, |
David Sterba | b14af3b | 2015-10-08 10:43:10 +0200 | [diff] [blame] | 2183 | "failed to rebuild valid logical %llu for dev %s", |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2184 | logical, rcu_str_deref(dev->name)); |
| 2185 | } else { |
| 2186 | scrub_write_block_to_dev_replace(sblock); |
| 2187 | } |
| 2188 | |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 2189 | if (sctx->is_dev_replace && sctx->flush_all_writes) { |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 2190 | mutex_lock(&sctx->wr_lock); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2191 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 2192 | mutex_unlock(&sctx->wr_lock); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2193 | } |
| 2194 | |
Omar Sandoval | 57d4f0b | 2019-09-16 11:30:56 -0700 | [diff] [blame] | 2195 | scrub_block_put(sblock); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2196 | scrub_pending_bio_dec(sctx); |
| 2197 | } |
| 2198 | |
| 2199 | static void scrub_missing_raid56_pages(struct scrub_block *sblock) |
| 2200 | { |
| 2201 | struct scrub_ctx *sctx = sblock->sctx; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2202 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2203 | u64 length = sblock->page_count * PAGE_SIZE; |
| 2204 | u64 logical = sblock->pagev[0]->logical; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2205 | struct btrfs_io_context *bioc = NULL; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2206 | struct bio *bio; |
| 2207 | struct btrfs_raid_bio *rbio; |
| 2208 | int ret; |
| 2209 | int i; |
| 2210 | |
Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2211 | btrfs_bio_counter_inc_blocked(fs_info); |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 2212 | ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2213 | &length, &bioc); |
| 2214 | if (ret || !bioc || !bioc->raid_map) |
| 2215 | goto bioc_out; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2216 | |
| 2217 | if (WARN_ON(!sctx->is_dev_replace || |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2218 | !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) { |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2219 | /* |
| 2220 | * We shouldn't be scrubbing a missing device. Even for dev |
| 2221 | * replace, we should only get here for RAID 5/6. We either |
| 2222 | * managed to mount something with no mirrors remaining or |
| 2223 | * there's a bug in scrub_remap_extent()/btrfs_map_block(). |
| 2224 | */ |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2225 | goto bioc_out; |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2226 | } |
| 2227 | |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 2228 | bio = btrfs_bio_alloc(BIO_MAX_VECS); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2229 | bio->bi_iter.bi_sector = logical >> 9; |
| 2230 | bio->bi_private = sblock; |
| 2231 | bio->bi_end_io = scrub_missing_raid56_end_io; |
| 2232 | |
Qu Wenruo | 6a258d7 | 2021-09-23 14:00:09 +0800 | [diff] [blame] | 2233 | rbio = raid56_alloc_missing_rbio(bio, bioc, length); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2234 | if (!rbio) |
| 2235 | goto rbio_out; |
| 2236 | |
| 2237 | for (i = 0; i < sblock->page_count; i++) { |
| 2238 | struct scrub_page *spage = sblock->pagev[i]; |
| 2239 | |
| 2240 | raid56_add_scrub_pages(rbio, spage->page, spage->logical); |
| 2241 | } |
| 2242 | |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 2243 | btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2244 | scrub_block_get(sblock); |
| 2245 | scrub_pending_bio_inc(sctx); |
| 2246 | raid56_submit_missing_rbio(rbio); |
| 2247 | return; |
| 2248 | |
| 2249 | rbio_out: |
| 2250 | bio_put(bio); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2251 | bioc_out: |
Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2252 | btrfs_bio_counter_dec(fs_info); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2253 | btrfs_put_bioc(bioc); |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2254 | spin_lock(&sctx->stat_lock); |
| 2255 | sctx->stat.malloc_errors++; |
| 2256 | spin_unlock(&sctx->stat_lock); |
| 2257 | } |
| 2258 | |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2259 | static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2260 | u64 physical, struct btrfs_device *dev, u64 flags, |
Qu Wenruo | 96e63a4 | 2020-11-03 21:31:02 +0800 | [diff] [blame] | 2261 | u64 gen, int mirror_num, u8 *csum, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2262 | u64 physical_for_dev_replace) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2263 | { |
| 2264 | struct scrub_block *sblock; |
Qu Wenruo | d0a7a9c | 2020-12-02 14:48:08 +0800 | [diff] [blame] | 2265 | const u32 sectorsize = sctx->fs_info->sectorsize; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2266 | int index; |
| 2267 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2268 | sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2269 | if (!sblock) { |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2270 | spin_lock(&sctx->stat_lock); |
| 2271 | sctx->stat.malloc_errors++; |
| 2272 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2273 | return -ENOMEM; |
| 2274 | } |
| 2275 | |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2276 | /* one ref inside this function, plus one for each page added to |
| 2277 | * a bio later on */ |
Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2278 | refcount_set(&sblock->refs, 1); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2279 | sblock->sctx = sctx; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2280 | sblock->no_io_error_seen = 1; |
| 2281 | |
| 2282 | for (index = 0; len > 0; index++) { |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2283 | struct scrub_page *spage; |
Qu Wenruo | d0a7a9c | 2020-12-02 14:48:08 +0800 | [diff] [blame] | 2284 | /* |
| 2285 | * Here we will allocate one page for one sector to scrub. |
| 2286 | * This is fine if PAGE_SIZE == sectorsize, but will cost |
| 2287 | * more memory for PAGE_SIZE > sectorsize case. |
| 2288 | */ |
| 2289 | u32 l = min(sectorsize, len); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2290 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2291 | spage = kzalloc(sizeof(*spage), GFP_KERNEL); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2292 | if (!spage) { |
| 2293 | leave_nomem: |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2294 | spin_lock(&sctx->stat_lock); |
| 2295 | sctx->stat.malloc_errors++; |
| 2296 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2297 | scrub_block_put(sblock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2298 | return -ENOMEM; |
| 2299 | } |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2300 | BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); |
| 2301 | scrub_page_get(spage); |
| 2302 | sblock->pagev[index] = spage; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2303 | spage->sblock = sblock; |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2304 | spage->dev = dev; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2305 | spage->flags = flags; |
| 2306 | spage->generation = gen; |
| 2307 | spage->logical = logical; |
| 2308 | spage->physical = physical; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2309 | spage->physical_for_dev_replace = physical_for_dev_replace; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2310 | spage->mirror_num = mirror_num; |
| 2311 | if (csum) { |
| 2312 | spage->have_csum = 1; |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 2313 | memcpy(spage->csum, csum, sctx->fs_info->csum_size); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2314 | } else { |
| 2315 | spage->have_csum = 0; |
| 2316 | } |
| 2317 | sblock->page_count++; |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2318 | spage->page = alloc_page(GFP_KERNEL); |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2319 | if (!spage->page) |
| 2320 | goto leave_nomem; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2321 | len -= l; |
| 2322 | logical += l; |
| 2323 | physical += l; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2324 | physical_for_dev_replace += l; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2325 | } |
| 2326 | |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 2327 | WARN_ON(sblock->page_count == 0); |
Anand Jain | e6e674b | 2017-12-04 12:54:54 +0800 | [diff] [blame] | 2328 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2329 | /* |
| 2330 | * This case should only be hit for RAID 5/6 device replace. See |
| 2331 | * the comment in scrub_missing_raid56_pages() for details. |
| 2332 | */ |
| 2333 | scrub_missing_raid56_pages(sblock); |
| 2334 | } else { |
| 2335 | for (index = 0; index < sblock->page_count; index++) { |
| 2336 | struct scrub_page *spage = sblock->pagev[index]; |
| 2337 | int ret; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2338 | |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2339 | ret = scrub_add_page_to_rd_bio(sctx, spage); |
| 2340 | if (ret) { |
| 2341 | scrub_block_put(sblock); |
| 2342 | return ret; |
| 2343 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2344 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2345 | |
Qu Wenruo | 96e63a4 | 2020-11-03 21:31:02 +0800 | [diff] [blame] | 2346 | if (flags & BTRFS_EXTENT_FLAG_SUPER) |
Omar Sandoval | 73ff61d | 2015-06-19 11:52:51 -0700 | [diff] [blame] | 2347 | scrub_submit(sctx); |
| 2348 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2349 | |
| 2350 | /* last one frees, either here or in bio completion for last page */ |
| 2351 | scrub_block_put(sblock); |
| 2352 | return 0; |
| 2353 | } |
| 2354 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2355 | static void scrub_bio_end_io(struct bio *bio) |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2356 | { |
| 2357 | struct scrub_bio *sbio = bio->bi_private; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2358 | struct btrfs_fs_info *fs_info = sbio->dev->fs_info; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2359 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 2360 | sbio->status = bio->bi_status; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2361 | sbio->bio = bio; |
| 2362 | |
Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 2363 | btrfs_queue_work(fs_info->scrub_workers, &sbio->work); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2364 | } |
| 2365 | |
| 2366 | static void scrub_bio_end_io_worker(struct btrfs_work *work) |
| 2367 | { |
| 2368 | struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2369 | struct scrub_ctx *sctx = sbio->sctx; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2370 | int i; |
| 2371 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2372 | BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 2373 | if (sbio->status) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2374 | for (i = 0; i < sbio->page_count; i++) { |
| 2375 | struct scrub_page *spage = sbio->pagev[i]; |
| 2376 | |
| 2377 | spage->io_error = 1; |
| 2378 | spage->sblock->no_io_error_seen = 0; |
| 2379 | } |
| 2380 | } |
| 2381 | |
| 2382 | /* now complete the scrub_block items that have all pages completed */ |
| 2383 | for (i = 0; i < sbio->page_count; i++) { |
| 2384 | struct scrub_page *spage = sbio->pagev[i]; |
| 2385 | struct scrub_block *sblock = spage->sblock; |
| 2386 | |
| 2387 | if (atomic_dec_and_test(&sblock->outstanding_pages)) |
| 2388 | scrub_block_complete(sblock); |
| 2389 | scrub_block_put(sblock); |
| 2390 | } |
| 2391 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2392 | bio_put(sbio->bio); |
| 2393 | sbio->bio = NULL; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2394 | spin_lock(&sctx->list_lock); |
| 2395 | sbio->next_free = sctx->first_free; |
| 2396 | sctx->first_free = sbio->index; |
| 2397 | spin_unlock(&sctx->list_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2398 | |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 2399 | if (sctx->is_dev_replace && sctx->flush_all_writes) { |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 2400 | mutex_lock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2401 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 2402 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2403 | } |
| 2404 | |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 2405 | scrub_pending_bio_dec(sctx); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2406 | } |
| 2407 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2408 | static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, |
| 2409 | unsigned long *bitmap, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2410 | u64 start, u32 len) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2411 | { |
Liu Bo | 972d721 | 2017-04-03 13:45:33 -0700 | [diff] [blame] | 2412 | u64 offset; |
David Sterba | 7736b0a | 2017-03-31 18:02:48 +0200 | [diff] [blame] | 2413 | u32 nsectors; |
David Sterba | ab108d9 | 2020-07-01 20:45:04 +0200 | [diff] [blame] | 2414 | u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2415 | |
| 2416 | if (len >= sparity->stripe_len) { |
| 2417 | bitmap_set(bitmap, 0, sparity->nsectors); |
| 2418 | return; |
| 2419 | } |
| 2420 | |
| 2421 | start -= sparity->logic_start; |
Liu Bo | 972d721 | 2017-04-03 13:45:33 -0700 | [diff] [blame] | 2422 | start = div64_u64_rem(start, sparity->stripe_len, &offset); |
David Sterba | ab108d9 | 2020-07-01 20:45:04 +0200 | [diff] [blame] | 2423 | offset = offset >> sectorsize_bits; |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2424 | nsectors = len >> sectorsize_bits; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2425 | |
| 2426 | if (offset + nsectors <= sparity->nsectors) { |
| 2427 | bitmap_set(bitmap, offset, nsectors); |
| 2428 | return; |
| 2429 | } |
| 2430 | |
| 2431 | bitmap_set(bitmap, offset, sparity->nsectors - offset); |
| 2432 | bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); |
| 2433 | } |
| 2434 | |
| 2435 | static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2436 | u64 start, u32 len) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2437 | { |
| 2438 | __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); |
| 2439 | } |
| 2440 | |
| 2441 | static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2442 | u64 start, u32 len) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2443 | { |
| 2444 | __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); |
| 2445 | } |
| 2446 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2447 | static void scrub_block_complete(struct scrub_block *sblock) |
| 2448 | { |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2449 | int corrupted = 0; |
| 2450 | |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2451 | if (!sblock->no_io_error_seen) { |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2452 | corrupted = 1; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2453 | scrub_handle_errored_block(sblock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2454 | } else { |
| 2455 | /* |
| 2456 | * if has checksum error, write via repair mechanism in |
| 2457 | * dev replace case, otherwise write here in dev replace |
| 2458 | * case. |
| 2459 | */ |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2460 | corrupted = scrub_checksum(sblock); |
| 2461 | if (!corrupted && sblock->sctx->is_dev_replace) |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2462 | scrub_write_block_to_dev_replace(sblock); |
| 2463 | } |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2464 | |
| 2465 | if (sblock->sparity && corrupted && !sblock->data_corrected) { |
| 2466 | u64 start = sblock->pagev[0]->logical; |
| 2467 | u64 end = sblock->pagev[sblock->page_count - 1]->logical + |
Qu Wenruo | 8df507c | 2021-04-22 19:02:46 +0800 | [diff] [blame] | 2468 | sblock->sctx->fs_info->sectorsize; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2469 | |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2470 | ASSERT(end - start <= U32_MAX); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2471 | scrub_parity_mark_sectors_error(sblock->sparity, |
| 2472 | start, end - start); |
| 2473 | } |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2474 | } |
| 2475 | |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2476 | static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum) |
| 2477 | { |
| 2478 | sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits; |
| 2479 | list_del(&sum->list); |
| 2480 | kfree(sum); |
| 2481 | } |
| 2482 | |
| 2483 | /* |
| 2484 | * Find the desired csum for range [logical, logical + sectorsize), and store |
| 2485 | * the csum into @csum. |
| 2486 | * |
| 2487 | * The search source is sctx->csum_list, which is a pre-populated list |
David Sterba | 1a9fd41 | 2021-05-21 17:42:23 +0200 | [diff] [blame] | 2488 | * storing bytenr ordered csum ranges. We're responsible to cleanup any range |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2489 | * that is before @logical. |
| 2490 | * |
| 2491 | * Return 0 if there is no csum for the range. |
| 2492 | * Return 1 if there is csum for the range and copied to @csum. |
| 2493 | */ |
Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2494 | static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2495 | { |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2496 | bool found = false; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2497 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2498 | while (!list_empty(&sctx->csum_list)) { |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2499 | struct btrfs_ordered_sum *sum = NULL; |
| 2500 | unsigned long index; |
| 2501 | unsigned long num_sectors; |
| 2502 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2503 | sum = list_first_entry(&sctx->csum_list, |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2504 | struct btrfs_ordered_sum, list); |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2505 | /* The current csum range is beyond our range, no csum found */ |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2506 | if (sum->bytenr > logical) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2507 | break; |
| 2508 | |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2509 | /* |
| 2510 | * The current sum is before our bytenr, since scrub is always |
| 2511 | * done in bytenr order, the csum will never be used anymore, |
| 2512 | * clean it up so that later calls won't bother with the range, |
| 2513 | * and continue search the next range. |
| 2514 | */ |
| 2515 | if (sum->bytenr + sum->len <= logical) { |
| 2516 | drop_csum_range(sctx, sum); |
| 2517 | continue; |
| 2518 | } |
| 2519 | |
| 2520 | /* Now the csum range covers our bytenr, copy the csum */ |
| 2521 | found = true; |
| 2522 | index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits; |
| 2523 | num_sectors = sum->len >> sctx->fs_info->sectorsize_bits; |
| 2524 | |
| 2525 | memcpy(csum, sum->sums + index * sctx->fs_info->csum_size, |
| 2526 | sctx->fs_info->csum_size); |
| 2527 | |
| 2528 | /* Cleanup the range if we're at the end of the csum range */ |
| 2529 | if (index == num_sectors - 1) |
| 2530 | drop_csum_range(sctx, sum); |
| 2531 | break; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2532 | } |
Qu Wenruo | 480a8ec | 2020-11-03 21:31:04 +0800 | [diff] [blame] | 2533 | if (!found) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2534 | return 0; |
Miao Xie | f51a4a1 | 2013-06-19 10:36:09 +0800 | [diff] [blame] | 2535 | return 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2536 | } |
| 2537 | |
| 2538 | /* scrub extent tries to collect up to 64 kB for each bio */ |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 2539 | static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2540 | u64 logical, u32 len, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2541 | u64 physical, struct btrfs_device *dev, u64 flags, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2542 | u64 gen, int mirror_num, u64 physical_for_dev_replace) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2543 | { |
| 2544 | int ret; |
| 2545 | u8 csum[BTRFS_CSUM_SIZE]; |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2546 | u32 blocksize; |
| 2547 | |
| 2548 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 2549 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) |
| 2550 | blocksize = map->stripe_len; |
| 2551 | else |
| 2552 | blocksize = sctx->fs_info->sectorsize; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2553 | spin_lock(&sctx->stat_lock); |
| 2554 | sctx->stat.data_extents_scrubbed++; |
| 2555 | sctx->stat.data_bytes_scrubbed += len; |
| 2556 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2557 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 2558 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) |
| 2559 | blocksize = map->stripe_len; |
| 2560 | else |
| 2561 | blocksize = sctx->fs_info->nodesize; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2562 | spin_lock(&sctx->stat_lock); |
| 2563 | sctx->stat.tree_extents_scrubbed++; |
| 2564 | sctx->stat.tree_bytes_scrubbed += len; |
| 2565 | spin_unlock(&sctx->stat_lock); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2566 | } else { |
David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame] | 2567 | blocksize = sctx->fs_info->sectorsize; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2568 | WARN_ON(1); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 2569 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2570 | |
| 2571 | while (len) { |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2572 | u32 l = min(len, blocksize); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2573 | int have_csum = 0; |
| 2574 | |
| 2575 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
| 2576 | /* push csums to sbio */ |
Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2577 | have_csum = scrub_find_csum(sctx, logical, csum); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2578 | if (have_csum == 0) |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 2579 | ++sctx->stat.no_csum; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2580 | } |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 2581 | ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, |
Qu Wenruo | 96e63a4 | 2020-11-03 21:31:02 +0800 | [diff] [blame] | 2582 | mirror_num, have_csum ? csum : NULL, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2583 | physical_for_dev_replace); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2584 | if (ret) |
| 2585 | return ret; |
| 2586 | len -= l; |
| 2587 | logical += l; |
| 2588 | physical += l; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 2589 | physical_for_dev_replace += l; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 2590 | } |
| 2591 | return 0; |
| 2592 | } |
| 2593 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2594 | static int scrub_pages_for_parity(struct scrub_parity *sparity, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2595 | u64 logical, u32 len, |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2596 | u64 physical, struct btrfs_device *dev, |
| 2597 | u64 flags, u64 gen, int mirror_num, u8 *csum) |
| 2598 | { |
| 2599 | struct scrub_ctx *sctx = sparity->sctx; |
| 2600 | struct scrub_block *sblock; |
Qu Wenruo | d0a7a9c | 2020-12-02 14:48:08 +0800 | [diff] [blame] | 2601 | const u32 sectorsize = sctx->fs_info->sectorsize; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2602 | int index; |
| 2603 | |
Qu Wenruo | d0a7a9c | 2020-12-02 14:48:08 +0800 | [diff] [blame] | 2604 | ASSERT(IS_ALIGNED(len, sectorsize)); |
| 2605 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2606 | sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2607 | if (!sblock) { |
| 2608 | spin_lock(&sctx->stat_lock); |
| 2609 | sctx->stat.malloc_errors++; |
| 2610 | spin_unlock(&sctx->stat_lock); |
| 2611 | return -ENOMEM; |
| 2612 | } |
| 2613 | |
| 2614 | /* one ref inside this function, plus one for each page added to |
| 2615 | * a bio later on */ |
Elena Reshetova | 186debd | 2017-03-03 10:55:23 +0200 | [diff] [blame] | 2616 | refcount_set(&sblock->refs, 1); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2617 | sblock->sctx = sctx; |
| 2618 | sblock->no_io_error_seen = 1; |
| 2619 | sblock->sparity = sparity; |
| 2620 | scrub_parity_get(sparity); |
| 2621 | |
| 2622 | for (index = 0; len > 0; index++) { |
| 2623 | struct scrub_page *spage; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2624 | |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2625 | spage = kzalloc(sizeof(*spage), GFP_KERNEL); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2626 | if (!spage) { |
| 2627 | leave_nomem: |
| 2628 | spin_lock(&sctx->stat_lock); |
| 2629 | sctx->stat.malloc_errors++; |
| 2630 | spin_unlock(&sctx->stat_lock); |
| 2631 | scrub_block_put(sblock); |
| 2632 | return -ENOMEM; |
| 2633 | } |
| 2634 | BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); |
| 2635 | /* For scrub block */ |
| 2636 | scrub_page_get(spage); |
| 2637 | sblock->pagev[index] = spage; |
| 2638 | /* For scrub parity */ |
| 2639 | scrub_page_get(spage); |
| 2640 | list_add_tail(&spage->list, &sparity->spages); |
| 2641 | spage->sblock = sblock; |
| 2642 | spage->dev = dev; |
| 2643 | spage->flags = flags; |
| 2644 | spage->generation = gen; |
| 2645 | spage->logical = logical; |
| 2646 | spage->physical = physical; |
| 2647 | spage->mirror_num = mirror_num; |
| 2648 | if (csum) { |
| 2649 | spage->have_csum = 1; |
David Sterba | 2ae0c2d | 2020-06-30 17:44:49 +0200 | [diff] [blame] | 2650 | memcpy(spage->csum, csum, sctx->fs_info->csum_size); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2651 | } else { |
| 2652 | spage->have_csum = 0; |
| 2653 | } |
| 2654 | sblock->page_count++; |
David Sterba | 58c4e17 | 2016-02-11 10:49:42 +0100 | [diff] [blame] | 2655 | spage->page = alloc_page(GFP_KERNEL); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2656 | if (!spage->page) |
| 2657 | goto leave_nomem; |
Qu Wenruo | d0a7a9c | 2020-12-02 14:48:08 +0800 | [diff] [blame] | 2658 | |
| 2659 | |
| 2660 | /* Iterate over the stripe range in sectorsize steps */ |
| 2661 | len -= sectorsize; |
| 2662 | logical += sectorsize; |
| 2663 | physical += sectorsize; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2664 | } |
| 2665 | |
| 2666 | WARN_ON(sblock->page_count == 0); |
| 2667 | for (index = 0; index < sblock->page_count; index++) { |
| 2668 | struct scrub_page *spage = sblock->pagev[index]; |
| 2669 | int ret; |
| 2670 | |
| 2671 | ret = scrub_add_page_to_rd_bio(sctx, spage); |
| 2672 | if (ret) { |
| 2673 | scrub_block_put(sblock); |
| 2674 | return ret; |
| 2675 | } |
| 2676 | } |
| 2677 | |
| 2678 | /* last one frees, either here or in bio completion for last page */ |
| 2679 | scrub_block_put(sblock); |
| 2680 | return 0; |
| 2681 | } |
| 2682 | |
| 2683 | static int scrub_extent_for_parity(struct scrub_parity *sparity, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2684 | u64 logical, u32 len, |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2685 | u64 physical, struct btrfs_device *dev, |
| 2686 | u64 flags, u64 gen, int mirror_num) |
| 2687 | { |
| 2688 | struct scrub_ctx *sctx = sparity->sctx; |
| 2689 | int ret; |
| 2690 | u8 csum[BTRFS_CSUM_SIZE]; |
| 2691 | u32 blocksize; |
| 2692 | |
Anand Jain | e6e674b | 2017-12-04 12:54:54 +0800 | [diff] [blame] | 2693 | if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 2694 | scrub_parity_mark_sectors_error(sparity, logical, len); |
| 2695 | return 0; |
| 2696 | } |
| 2697 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2698 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 2699 | blocksize = sparity->stripe_len; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2700 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 2701 | blocksize = sparity->stripe_len; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2702 | } else { |
David Sterba | 25cc122 | 2017-05-16 19:10:41 +0200 | [diff] [blame] | 2703 | blocksize = sctx->fs_info->sectorsize; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2704 | WARN_ON(1); |
| 2705 | } |
| 2706 | |
| 2707 | while (len) { |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2708 | u32 l = min(len, blocksize); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2709 | int have_csum = 0; |
| 2710 | |
| 2711 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
| 2712 | /* push csums to sbio */ |
Zhao Lei | 3b5753e | 2015-08-24 22:03:02 +0800 | [diff] [blame] | 2713 | have_csum = scrub_find_csum(sctx, logical, csum); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2714 | if (have_csum == 0) |
| 2715 | goto skip; |
| 2716 | } |
| 2717 | ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, |
| 2718 | flags, gen, mirror_num, |
| 2719 | have_csum ? csum : NULL); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2720 | if (ret) |
| 2721 | return ret; |
Dan Carpenter | 6b6d24b | 2014-12-12 22:30:00 +0300 | [diff] [blame] | 2722 | skip: |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2723 | len -= l; |
| 2724 | logical += l; |
| 2725 | physical += l; |
| 2726 | } |
| 2727 | return 0; |
| 2728 | } |
| 2729 | |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2730 | /* |
| 2731 | * Given a physical address, this will calculate it's |
| 2732 | * logical offset. if this is a parity stripe, it will return |
| 2733 | * the most left data stripe's logical offset. |
| 2734 | * |
| 2735 | * return 0 if it is a data stripe, 1 means parity stripe. |
| 2736 | */ |
| 2737 | static int get_raid56_logic_offset(u64 physical, int num, |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2738 | struct map_lookup *map, u64 *offset, |
| 2739 | u64 *stripe_start) |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2740 | { |
| 2741 | int i; |
| 2742 | int j = 0; |
| 2743 | u64 stripe_nr; |
| 2744 | u64 last_offset; |
David Sterba | 9d644a6 | 2015-02-20 18:42:11 +0100 | [diff] [blame] | 2745 | u32 stripe_index; |
| 2746 | u32 rot; |
David Sterba | cff8267 | 2019-05-17 11:43:45 +0200 | [diff] [blame] | 2747 | const int data_stripes = nr_data_stripes(map); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2748 | |
David Sterba | cff8267 | 2019-05-17 11:43:45 +0200 | [diff] [blame] | 2749 | last_offset = (physical - map->stripes[num].physical) * data_stripes; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2750 | if (stripe_start) |
| 2751 | *stripe_start = last_offset; |
| 2752 | |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2753 | *offset = last_offset; |
David Sterba | cff8267 | 2019-05-17 11:43:45 +0200 | [diff] [blame] | 2754 | for (i = 0; i < data_stripes; i++) { |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2755 | *offset = last_offset + i * map->stripe_len; |
| 2756 | |
Liu Bo | 42c61ab | 2017-04-03 13:45:24 -0700 | [diff] [blame] | 2757 | stripe_nr = div64_u64(*offset, map->stripe_len); |
David Sterba | cff8267 | 2019-05-17 11:43:45 +0200 | [diff] [blame] | 2758 | stripe_nr = div_u64(stripe_nr, data_stripes); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2759 | |
| 2760 | /* Work out the disk rotation on this stripe-set */ |
David Sterba | 47c5713 | 2015-02-20 18:43:47 +0100 | [diff] [blame] | 2761 | stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2762 | /* calculate which stripe this data locates */ |
| 2763 | rot += i; |
Wang Shilong | e4fbaee | 2014-04-11 18:32:25 +0800 | [diff] [blame] | 2764 | stripe_index = rot % map->num_stripes; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 2765 | if (stripe_index == num) |
| 2766 | return 0; |
| 2767 | if (stripe_index < num) |
| 2768 | j++; |
| 2769 | } |
| 2770 | *offset = last_offset + j * map->stripe_len; |
| 2771 | return 1; |
| 2772 | } |
| 2773 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2774 | static void scrub_free_parity(struct scrub_parity *sparity) |
| 2775 | { |
| 2776 | struct scrub_ctx *sctx = sparity->sctx; |
| 2777 | struct scrub_page *curr, *next; |
| 2778 | int nbits; |
| 2779 | |
| 2780 | nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); |
| 2781 | if (nbits) { |
| 2782 | spin_lock(&sctx->stat_lock); |
| 2783 | sctx->stat.read_errors += nbits; |
| 2784 | sctx->stat.uncorrectable_errors += nbits; |
| 2785 | spin_unlock(&sctx->stat_lock); |
| 2786 | } |
| 2787 | |
| 2788 | list_for_each_entry_safe(curr, next, &sparity->spages, list) { |
| 2789 | list_del_init(&curr->list); |
| 2790 | scrub_page_put(curr); |
| 2791 | } |
| 2792 | |
| 2793 | kfree(sparity); |
| 2794 | } |
| 2795 | |
Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 2796 | static void scrub_parity_bio_endio_worker(struct btrfs_work *work) |
| 2797 | { |
| 2798 | struct scrub_parity *sparity = container_of(work, struct scrub_parity, |
| 2799 | work); |
| 2800 | struct scrub_ctx *sctx = sparity->sctx; |
| 2801 | |
| 2802 | scrub_free_parity(sparity); |
| 2803 | scrub_pending_bio_dec(sctx); |
| 2804 | } |
| 2805 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2806 | static void scrub_parity_bio_endio(struct bio *bio) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2807 | { |
| 2808 | struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2809 | struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2810 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 2811 | if (bio->bi_status) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2812 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, |
| 2813 | sparity->nsectors); |
| 2814 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2815 | bio_put(bio); |
Zhao Lei | 20b2e30 | 2015-06-04 20:09:15 +0800 | [diff] [blame] | 2816 | |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 2817 | btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL, |
| 2818 | NULL); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2819 | btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2820 | } |
| 2821 | |
| 2822 | static void scrub_parity_check_and_repair(struct scrub_parity *sparity) |
| 2823 | { |
| 2824 | struct scrub_ctx *sctx = sparity->sctx; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2825 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2826 | struct bio *bio; |
| 2827 | struct btrfs_raid_bio *rbio; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2828 | struct btrfs_io_context *bioc = NULL; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2829 | u64 length; |
| 2830 | int ret; |
| 2831 | |
| 2832 | if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, |
| 2833 | sparity->nsectors)) |
| 2834 | goto out; |
| 2835 | |
Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 2836 | length = sparity->logic_end - sparity->logic_start; |
Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2837 | |
| 2838 | btrfs_bio_counter_inc_blocked(fs_info); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2839 | ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2840 | &length, &bioc); |
| 2841 | if (ret || !bioc || !bioc->raid_map) |
| 2842 | goto bioc_out; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2843 | |
Qu Wenruo | c3a3b19 | 2021-09-15 15:17:18 +0800 | [diff] [blame] | 2844 | bio = btrfs_bio_alloc(BIO_MAX_VECS); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2845 | bio->bi_iter.bi_sector = sparity->logic_start >> 9; |
| 2846 | bio->bi_private = sparity; |
| 2847 | bio->bi_end_io = scrub_parity_bio_endio; |
| 2848 | |
Qu Wenruo | 6a258d7 | 2021-09-23 14:00:09 +0800 | [diff] [blame] | 2849 | rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, length, |
| 2850 | sparity->scrub_dev, |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2851 | sparity->dbitmap, |
| 2852 | sparity->nsectors); |
| 2853 | if (!rbio) |
| 2854 | goto rbio_out; |
| 2855 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2856 | scrub_pending_bio_inc(sctx); |
| 2857 | raid56_parity_submit_scrub_rbio(rbio); |
| 2858 | return; |
| 2859 | |
| 2860 | rbio_out: |
| 2861 | bio_put(bio); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2862 | bioc_out: |
Qu Wenruo | ae6529c | 2017-03-29 09:33:21 +0800 | [diff] [blame] | 2863 | btrfs_bio_counter_dec(fs_info); |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2864 | btrfs_put_bioc(bioc); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2865 | bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, |
| 2866 | sparity->nsectors); |
| 2867 | spin_lock(&sctx->stat_lock); |
| 2868 | sctx->stat.malloc_errors++; |
| 2869 | spin_unlock(&sctx->stat_lock); |
| 2870 | out: |
| 2871 | scrub_free_parity(sparity); |
| 2872 | } |
| 2873 | |
| 2874 | static inline int scrub_calc_parity_bitmap_len(int nsectors) |
| 2875 | { |
Zhao Lei | bfca9a6 | 2014-12-08 19:55:57 +0800 | [diff] [blame] | 2876 | return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2877 | } |
| 2878 | |
| 2879 | static void scrub_parity_get(struct scrub_parity *sparity) |
| 2880 | { |
Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 2881 | refcount_inc(&sparity->refs); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2882 | } |
| 2883 | |
| 2884 | static void scrub_parity_put(struct scrub_parity *sparity) |
| 2885 | { |
Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 2886 | if (!refcount_dec_and_test(&sparity->refs)) |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2887 | return; |
| 2888 | |
| 2889 | scrub_parity_check_and_repair(sparity); |
| 2890 | } |
| 2891 | |
| 2892 | static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, |
| 2893 | struct map_lookup *map, |
| 2894 | struct btrfs_device *sdev, |
| 2895 | struct btrfs_path *path, |
| 2896 | u64 logic_start, |
| 2897 | u64 logic_end) |
| 2898 | { |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 2899 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Josef Bacik | 29cbcf4 | 2021-11-05 16:45:45 -0400 | [diff] [blame^] | 2900 | struct btrfs_root *root = btrfs_extent_root(fs_info, logic_start); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2901 | struct btrfs_root *csum_root = fs_info->csum_root; |
| 2902 | struct btrfs_extent_item *extent; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 2903 | struct btrfs_io_context *bioc = NULL; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2904 | u64 flags; |
| 2905 | int ret; |
| 2906 | int slot; |
| 2907 | struct extent_buffer *l; |
| 2908 | struct btrfs_key key; |
| 2909 | u64 generation; |
| 2910 | u64 extent_logical; |
| 2911 | u64 extent_physical; |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2912 | /* Check the comment in scrub_stripe() for why u32 is enough here */ |
| 2913 | u32 extent_len; |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 2914 | u64 mapped_length; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2915 | struct btrfs_device *extent_dev; |
| 2916 | struct scrub_parity *sparity; |
| 2917 | int nsectors; |
| 2918 | int bitmap_len; |
| 2919 | int extent_mirror_num; |
| 2920 | int stop_loop = 0; |
| 2921 | |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2922 | ASSERT(map->stripe_len <= U32_MAX); |
David Sterba | ab108d9 | 2020-07-01 20:45:04 +0200 | [diff] [blame] | 2923 | nsectors = map->stripe_len >> fs_info->sectorsize_bits; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2924 | bitmap_len = scrub_calc_parity_bitmap_len(nsectors); |
| 2925 | sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, |
| 2926 | GFP_NOFS); |
| 2927 | if (!sparity) { |
| 2928 | spin_lock(&sctx->stat_lock); |
| 2929 | sctx->stat.malloc_errors++; |
| 2930 | spin_unlock(&sctx->stat_lock); |
| 2931 | return -ENOMEM; |
| 2932 | } |
| 2933 | |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 2934 | ASSERT(map->stripe_len <= U32_MAX); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2935 | sparity->stripe_len = map->stripe_len; |
| 2936 | sparity->nsectors = nsectors; |
| 2937 | sparity->sctx = sctx; |
| 2938 | sparity->scrub_dev = sdev; |
| 2939 | sparity->logic_start = logic_start; |
| 2940 | sparity->logic_end = logic_end; |
Elena Reshetova | 78a7645 | 2017-03-03 10:55:24 +0200 | [diff] [blame] | 2941 | refcount_set(&sparity->refs, 1); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2942 | INIT_LIST_HEAD(&sparity->spages); |
| 2943 | sparity->dbitmap = sparity->bitmap; |
| 2944 | sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; |
| 2945 | |
| 2946 | ret = 0; |
| 2947 | while (logic_start < logic_end) { |
| 2948 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
| 2949 | key.type = BTRFS_METADATA_ITEM_KEY; |
| 2950 | else |
| 2951 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 2952 | key.objectid = logic_start; |
| 2953 | key.offset = (u64)-1; |
| 2954 | |
| 2955 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 2956 | if (ret < 0) |
| 2957 | goto out; |
| 2958 | |
| 2959 | if (ret > 0) { |
| 2960 | ret = btrfs_previous_extent_item(root, path, 0); |
| 2961 | if (ret < 0) |
| 2962 | goto out; |
| 2963 | if (ret > 0) { |
| 2964 | btrfs_release_path(path); |
| 2965 | ret = btrfs_search_slot(NULL, root, &key, |
| 2966 | path, 0, 0); |
| 2967 | if (ret < 0) |
| 2968 | goto out; |
| 2969 | } |
| 2970 | } |
| 2971 | |
| 2972 | stop_loop = 0; |
| 2973 | while (1) { |
| 2974 | u64 bytes; |
| 2975 | |
| 2976 | l = path->nodes[0]; |
| 2977 | slot = path->slots[0]; |
| 2978 | if (slot >= btrfs_header_nritems(l)) { |
| 2979 | ret = btrfs_next_leaf(root, path); |
| 2980 | if (ret == 0) |
| 2981 | continue; |
| 2982 | if (ret < 0) |
| 2983 | goto out; |
| 2984 | |
| 2985 | stop_loop = 1; |
| 2986 | break; |
| 2987 | } |
| 2988 | btrfs_item_key_to_cpu(l, &key, slot); |
| 2989 | |
Zhao Lei | d7cad23 | 2015-07-22 13:14:48 +0800 | [diff] [blame] | 2990 | if (key.type != BTRFS_EXTENT_ITEM_KEY && |
| 2991 | key.type != BTRFS_METADATA_ITEM_KEY) |
| 2992 | goto next; |
| 2993 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2994 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2995 | bytes = fs_info->nodesize; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 2996 | else |
| 2997 | bytes = key.offset; |
| 2998 | |
| 2999 | if (key.objectid + bytes <= logic_start) |
| 3000 | goto next; |
| 3001 | |
Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3002 | if (key.objectid >= logic_end) { |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3003 | stop_loop = 1; |
| 3004 | break; |
| 3005 | } |
| 3006 | |
| 3007 | while (key.objectid >= logic_start + map->stripe_len) |
| 3008 | logic_start += map->stripe_len; |
| 3009 | |
| 3010 | extent = btrfs_item_ptr(l, slot, |
| 3011 | struct btrfs_extent_item); |
| 3012 | flags = btrfs_extent_flags(l, extent); |
| 3013 | generation = btrfs_extent_generation(l, extent); |
| 3014 | |
Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3015 | if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && |
| 3016 | (key.objectid < logic_start || |
| 3017 | key.objectid + bytes > |
| 3018 | logic_start + map->stripe_len)) { |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3019 | btrfs_err(fs_info, |
| 3020 | "scrub: tree block %llu spanning stripes, ignored. logical=%llu", |
Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3021 | key.objectid, logic_start); |
Zhao Lei | 9799d2c3 | 2015-08-25 21:31:40 +0800 | [diff] [blame] | 3022 | spin_lock(&sctx->stat_lock); |
| 3023 | sctx->stat.uncorrectable_errors++; |
| 3024 | spin_unlock(&sctx->stat_lock); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3025 | goto next; |
| 3026 | } |
| 3027 | again: |
| 3028 | extent_logical = key.objectid; |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 3029 | ASSERT(bytes <= U32_MAX); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3030 | extent_len = bytes; |
| 3031 | |
| 3032 | if (extent_logical < logic_start) { |
| 3033 | extent_len -= logic_start - extent_logical; |
| 3034 | extent_logical = logic_start; |
| 3035 | } |
| 3036 | |
| 3037 | if (extent_logical + extent_len > |
| 3038 | logic_start + map->stripe_len) |
| 3039 | extent_len = logic_start + map->stripe_len - |
| 3040 | extent_logical; |
| 3041 | |
| 3042 | scrub_parity_mark_sectors_data(sparity, extent_logical, |
| 3043 | extent_len); |
| 3044 | |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3045 | mapped_length = extent_len; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 3046 | bioc = NULL; |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 3047 | ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 3048 | extent_logical, &mapped_length, &bioc, |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 3049 | 0); |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3050 | if (!ret) { |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 3051 | if (!bioc || mapped_length < extent_len) |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3052 | ret = -EIO; |
| 3053 | } |
| 3054 | if (ret) { |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 3055 | btrfs_put_bioc(bioc); |
Omar Sandoval | 4a77089 | 2015-06-19 11:52:52 -0700 | [diff] [blame] | 3056 | goto out; |
| 3057 | } |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 3058 | extent_physical = bioc->stripes[0].physical; |
| 3059 | extent_mirror_num = bioc->mirror_num; |
| 3060 | extent_dev = bioc->stripes[0].dev; |
| 3061 | btrfs_put_bioc(bioc); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3062 | |
| 3063 | ret = btrfs_lookup_csums_range(csum_root, |
| 3064 | extent_logical, |
| 3065 | extent_logical + extent_len - 1, |
| 3066 | &sctx->csum_list, 1); |
| 3067 | if (ret) |
| 3068 | goto out; |
| 3069 | |
| 3070 | ret = scrub_extent_for_parity(sparity, extent_logical, |
| 3071 | extent_len, |
| 3072 | extent_physical, |
| 3073 | extent_dev, flags, |
| 3074 | generation, |
| 3075 | extent_mirror_num); |
Zhao Lei | 6fa96d7 | 2015-07-21 12:22:30 +0800 | [diff] [blame] | 3076 | |
| 3077 | scrub_free_csums(sctx); |
| 3078 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3079 | if (ret) |
| 3080 | goto out; |
| 3081 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3082 | if (extent_logical + extent_len < |
| 3083 | key.objectid + bytes) { |
| 3084 | logic_start += map->stripe_len; |
| 3085 | |
| 3086 | if (logic_start >= logic_end) { |
| 3087 | stop_loop = 1; |
| 3088 | break; |
| 3089 | } |
| 3090 | |
| 3091 | if (logic_start < key.objectid + bytes) { |
| 3092 | cond_resched(); |
| 3093 | goto again; |
| 3094 | } |
| 3095 | } |
| 3096 | next: |
| 3097 | path->slots[0]++; |
| 3098 | } |
| 3099 | |
| 3100 | btrfs_release_path(path); |
| 3101 | |
| 3102 | if (stop_loop) |
| 3103 | break; |
| 3104 | |
| 3105 | logic_start += map->stripe_len; |
| 3106 | } |
| 3107 | out: |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 3108 | if (ret < 0) { |
| 3109 | ASSERT(logic_end - logic_start <= U32_MAX); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3110 | scrub_parity_mark_sectors_error(sparity, logic_start, |
Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3111 | logic_end - logic_start); |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 3112 | } |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3113 | scrub_parity_put(sparity); |
| 3114 | scrub_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3115 | mutex_lock(&sctx->wr_lock); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3116 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3117 | mutex_unlock(&sctx->wr_lock); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3118 | |
| 3119 | btrfs_release_path(path); |
| 3120 | return ret < 0 ? ret : 0; |
| 3121 | } |
| 3122 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 3123 | static void sync_replace_for_zoned(struct scrub_ctx *sctx) |
| 3124 | { |
| 3125 | if (!btrfs_is_zoned(sctx->fs_info)) |
| 3126 | return; |
| 3127 | |
| 3128 | sctx->flush_all_writes = true; |
| 3129 | scrub_submit(sctx); |
| 3130 | mutex_lock(&sctx->wr_lock); |
| 3131 | scrub_wr_submit(sctx); |
| 3132 | mutex_unlock(&sctx->wr_lock); |
| 3133 | |
| 3134 | wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); |
| 3135 | } |
| 3136 | |
Naohiro Aota | 7db1c5d | 2021-02-04 19:22:14 +0900 | [diff] [blame] | 3137 | static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical, |
| 3138 | u64 physical, u64 physical_end) |
| 3139 | { |
| 3140 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
| 3141 | int ret = 0; |
| 3142 | |
| 3143 | if (!btrfs_is_zoned(fs_info)) |
| 3144 | return 0; |
| 3145 | |
| 3146 | wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); |
| 3147 | |
| 3148 | mutex_lock(&sctx->wr_lock); |
| 3149 | if (sctx->write_pointer < physical_end) { |
| 3150 | ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical, |
| 3151 | physical, |
| 3152 | sctx->write_pointer); |
| 3153 | if (ret) |
| 3154 | btrfs_err(fs_info, |
| 3155 | "zoned: failed to recover write pointer"); |
| 3156 | } |
| 3157 | mutex_unlock(&sctx->wr_lock); |
| 3158 | btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical); |
| 3159 | |
| 3160 | return ret; |
| 3161 | } |
| 3162 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3163 | static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3164 | struct map_lookup *map, |
| 3165 | struct btrfs_device *scrub_dev, |
Filipe Manana | 2473d24 | 2020-05-08 11:01:10 +0100 | [diff] [blame] | 3166 | int num, u64 base, u64 length, |
| 3167 | struct btrfs_block_group *cache) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3168 | { |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3169 | struct btrfs_path *path, *ppath; |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 3170 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Josef Bacik | 29cbcf4 | 2021-11-05 16:45:45 -0400 | [diff] [blame^] | 3171 | struct btrfs_root *root; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3172 | struct btrfs_root *csum_root = fs_info->csum_root; |
| 3173 | struct btrfs_extent_item *extent; |
Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3174 | struct blk_plug plug; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3175 | u64 flags; |
| 3176 | int ret; |
| 3177 | int slot; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3178 | u64 nstripes; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3179 | struct extent_buffer *l; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3180 | u64 physical; |
| 3181 | u64 logical; |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3182 | u64 logic_end; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3183 | u64 physical_end; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3184 | u64 generation; |
Jan Schmidt | e12fa9c | 2011-06-17 15:55:21 +0200 | [diff] [blame] | 3185 | int mirror_num; |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3186 | struct reada_control *reada1; |
| 3187 | struct reada_control *reada2; |
David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3188 | struct btrfs_key key; |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3189 | struct btrfs_key key_end; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3190 | u64 increment = map->stripe_len; |
| 3191 | u64 offset; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3192 | u64 extent_logical; |
| 3193 | u64 extent_physical; |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 3194 | /* |
| 3195 | * Unlike chunk length, extent length should never go beyond |
| 3196 | * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here. |
| 3197 | */ |
| 3198 | u32 extent_len; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3199 | u64 stripe_logical; |
| 3200 | u64 stripe_end; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3201 | struct btrfs_device *extent_dev; |
| 3202 | int extent_mirror_num; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3203 | int stop_loop = 0; |
David Woodhouse | 53b381b | 2013-01-29 18:40:14 -0500 | [diff] [blame] | 3204 | |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3205 | physical = map->stripes[num].physical; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3206 | offset = 0; |
Liu Bo | 42c61ab | 2017-04-03 13:45:24 -0700 | [diff] [blame] | 3207 | nstripes = div64_u64(length, map->stripe_len); |
David Sterba | 7735cd7 | 2019-11-28 15:37:46 +0100 | [diff] [blame] | 3208 | mirror_num = 1; |
| 3209 | increment = map->stripe_len; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3210 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { |
| 3211 | offset = map->stripe_len * num; |
| 3212 | increment = map->stripe_len * map->num_stripes; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3213 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { |
| 3214 | int factor = map->num_stripes / map->sub_stripes; |
| 3215 | offset = map->stripe_len * (num / map->sub_stripes); |
| 3216 | increment = map->stripe_len * factor; |
Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3217 | mirror_num = num % map->sub_stripes + 1; |
David Sterba | c7369b3 | 2019-05-31 15:39:31 +0200 | [diff] [blame] | 3218 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { |
Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3219 | mirror_num = num % map->num_stripes + 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3220 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
Jan Schmidt | 193ea74 | 2011-06-13 19:56:54 +0200 | [diff] [blame] | 3221 | mirror_num = num % map->num_stripes + 1; |
Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3222 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3223 | get_raid56_logic_offset(physical, num, map, &offset, NULL); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3224 | increment = map->stripe_len * nr_data_stripes(map); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3225 | } |
| 3226 | |
| 3227 | path = btrfs_alloc_path(); |
| 3228 | if (!path) |
| 3229 | return -ENOMEM; |
| 3230 | |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3231 | ppath = btrfs_alloc_path(); |
| 3232 | if (!ppath) { |
Tsutomu Itoh | 379d685 | 2015-01-09 17:37:52 +0900 | [diff] [blame] | 3233 | btrfs_free_path(path); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3234 | return -ENOMEM; |
| 3235 | } |
| 3236 | |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 3237 | /* |
| 3238 | * work on commit root. The related disk blocks are static as |
| 3239 | * long as COW is applied. This means, it is save to rewrite |
| 3240 | * them to repair disk errors without any race conditions |
| 3241 | */ |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3242 | path->search_commit_root = 1; |
| 3243 | path->skip_locking = 1; |
| 3244 | |
Gui Hecheng | 063c54d | 2015-01-09 09:39:40 +0800 | [diff] [blame] | 3245 | ppath->search_commit_root = 1; |
| 3246 | ppath->skip_locking = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3247 | /* |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3248 | * trigger the readahead for extent tree csum tree and wait for |
| 3249 | * completion. During readahead, the scrub is officially paused |
| 3250 | * to not hold off transaction commits |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3251 | */ |
| 3252 | logical = base + offset; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3253 | physical_end = physical + nstripes * map->stripe_len; |
Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3254 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3255 | get_raid56_logic_offset(physical_end, num, |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3256 | map, &logic_end, NULL); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3257 | logic_end += base; |
| 3258 | } else { |
| 3259 | logic_end = logical + increment * nstripes; |
| 3260 | } |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3261 | wait_event(sctx->list_wait, |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3262 | atomic_read(&sctx->bios_in_flight) == 0); |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 3263 | scrub_blocked_if_needed(fs_info); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3264 | |
Josef Bacik | 29cbcf4 | 2021-11-05 16:45:45 -0400 | [diff] [blame^] | 3265 | root = btrfs_extent_root(fs_info, logical); |
| 3266 | |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3267 | /* FIXME it might be better to start readahead at commit root */ |
David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3268 | key.objectid = logical; |
| 3269 | key.type = BTRFS_EXTENT_ITEM_KEY; |
| 3270 | key.offset = (u64)0; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3271 | key_end.objectid = logic_end; |
Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3272 | key_end.type = BTRFS_METADATA_ITEM_KEY; |
| 3273 | key_end.offset = (u64)-1; |
David Sterba | e6c11f9 | 2016-03-24 18:00:53 +0100 | [diff] [blame] | 3274 | reada1 = btrfs_reada_add(root, &key, &key_end); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3275 | |
Filipe Manana | a6889ca | 2020-10-12 11:55:26 +0100 | [diff] [blame] | 3276 | if (cache->flags & BTRFS_BLOCK_GROUP_DATA) { |
| 3277 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; |
| 3278 | key.type = BTRFS_EXTENT_CSUM_KEY; |
| 3279 | key.offset = logical; |
| 3280 | key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; |
| 3281 | key_end.type = BTRFS_EXTENT_CSUM_KEY; |
| 3282 | key_end.offset = logic_end; |
| 3283 | reada2 = btrfs_reada_add(csum_root, &key, &key_end); |
| 3284 | } else { |
| 3285 | reada2 = NULL; |
| 3286 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3287 | |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3288 | if (!IS_ERR(reada1)) |
| 3289 | btrfs_reada_wait(reada1); |
Filipe Manana | a6889ca | 2020-10-12 11:55:26 +0100 | [diff] [blame] | 3290 | if (!IS_ERR_OR_NULL(reada2)) |
Arne Jansen | 7a26285 | 2011-06-10 12:39:23 +0200 | [diff] [blame] | 3291 | btrfs_reada_wait(reada2); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3292 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3293 | |
| 3294 | /* |
| 3295 | * collect all data csums for the stripe to avoid seeking during |
| 3296 | * the scrub. This might currently (crc32) end up to be about 1MB |
| 3297 | */ |
Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3298 | blk_start_plug(&plug); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3299 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 3300 | if (sctx->is_dev_replace && |
| 3301 | btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) { |
| 3302 | mutex_lock(&sctx->wr_lock); |
| 3303 | sctx->write_pointer = physical; |
| 3304 | mutex_unlock(&sctx->wr_lock); |
| 3305 | sctx->flush_all_writes = true; |
| 3306 | } |
| 3307 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3308 | /* |
| 3309 | * now find all extents for each stripe and scrub them |
| 3310 | */ |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3311 | ret = 0; |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3312 | while (physical < physical_end) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3313 | /* |
| 3314 | * canceled? |
| 3315 | */ |
| 3316 | if (atomic_read(&fs_info->scrub_cancel_req) || |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3317 | atomic_read(&sctx->cancel_req)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3318 | ret = -ECANCELED; |
| 3319 | goto out; |
| 3320 | } |
| 3321 | /* |
| 3322 | * check to see if we have to pause |
| 3323 | */ |
| 3324 | if (atomic_read(&fs_info->scrub_pause_req)) { |
| 3325 | /* push queued extents */ |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 3326 | sctx->flush_all_writes = true; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3327 | scrub_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3328 | mutex_lock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3329 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3330 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3331 | wait_event(sctx->list_wait, |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3332 | atomic_read(&sctx->bios_in_flight) == 0); |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 3333 | sctx->flush_all_writes = false; |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 3334 | scrub_blocked_if_needed(fs_info); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3335 | } |
| 3336 | |
Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3337 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
| 3338 | ret = get_raid56_logic_offset(physical, num, map, |
| 3339 | &logical, |
| 3340 | &stripe_logical); |
| 3341 | logical += base; |
| 3342 | if (ret) { |
Zhao Lei | 7955323 | 2015-08-18 17:54:30 +0800 | [diff] [blame] | 3343 | /* it is parity strip */ |
Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3344 | stripe_logical += base; |
Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3345 | stripe_end = stripe_logical + increment; |
Zhao Lei | f2f66a2 | 2015-07-21 12:22:29 +0800 | [diff] [blame] | 3346 | ret = scrub_raid56_parity(sctx, map, scrub_dev, |
| 3347 | ppath, stripe_logical, |
| 3348 | stripe_end); |
| 3349 | if (ret) |
| 3350 | goto out; |
| 3351 | goto skip; |
| 3352 | } |
| 3353 | } |
| 3354 | |
Wang Shilong | 7c76edb | 2014-01-12 21:38:32 +0800 | [diff] [blame] | 3355 | if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
| 3356 | key.type = BTRFS_METADATA_ITEM_KEY; |
| 3357 | else |
| 3358 | key.type = BTRFS_EXTENT_ITEM_KEY; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3359 | key.objectid = logical; |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3360 | key.offset = (u64)-1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3361 | |
| 3362 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 3363 | if (ret < 0) |
| 3364 | goto out; |
Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3365 | |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3366 | if (ret > 0) { |
Wang Shilong | ade2e0b | 2014-01-12 21:38:33 +0800 | [diff] [blame] | 3367 | ret = btrfs_previous_extent_item(root, path, 0); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3368 | if (ret < 0) |
| 3369 | goto out; |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3370 | if (ret > 0) { |
| 3371 | /* there's no smaller item, so stick with the |
| 3372 | * larger one */ |
| 3373 | btrfs_release_path(path); |
| 3374 | ret = btrfs_search_slot(NULL, root, &key, |
| 3375 | path, 0, 0); |
| 3376 | if (ret < 0) |
| 3377 | goto out; |
| 3378 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3379 | } |
| 3380 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3381 | stop_loop = 0; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3382 | while (1) { |
Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3383 | u64 bytes; |
| 3384 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3385 | l = path->nodes[0]; |
| 3386 | slot = path->slots[0]; |
| 3387 | if (slot >= btrfs_header_nritems(l)) { |
| 3388 | ret = btrfs_next_leaf(root, path); |
| 3389 | if (ret == 0) |
| 3390 | continue; |
| 3391 | if (ret < 0) |
| 3392 | goto out; |
| 3393 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3394 | stop_loop = 1; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3395 | break; |
| 3396 | } |
| 3397 | btrfs_item_key_to_cpu(l, &key, slot); |
| 3398 | |
Zhao Lei | d7cad23 | 2015-07-22 13:14:48 +0800 | [diff] [blame] | 3399 | if (key.type != BTRFS_EXTENT_ITEM_KEY && |
| 3400 | key.type != BTRFS_METADATA_ITEM_KEY) |
| 3401 | goto next; |
| 3402 | |
Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3403 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3404 | bytes = fs_info->nodesize; |
Josef Bacik | 3173a18 | 2013-03-07 14:22:04 -0500 | [diff] [blame] | 3405 | else |
| 3406 | bytes = key.offset; |
| 3407 | |
| 3408 | if (key.objectid + bytes <= logical) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3409 | goto next; |
| 3410 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3411 | if (key.objectid >= logical + map->stripe_len) { |
| 3412 | /* out of this device extent */ |
| 3413 | if (key.objectid >= logic_end) |
| 3414 | stop_loop = 1; |
| 3415 | break; |
| 3416 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3417 | |
Filipe Manana | 2473d24 | 2020-05-08 11:01:10 +0100 | [diff] [blame] | 3418 | /* |
| 3419 | * If our block group was removed in the meanwhile, just |
| 3420 | * stop scrubbing since there is no point in continuing. |
| 3421 | * Continuing would prevent reusing its device extents |
| 3422 | * for new block groups for a long time. |
| 3423 | */ |
| 3424 | spin_lock(&cache->lock); |
| 3425 | if (cache->removed) { |
| 3426 | spin_unlock(&cache->lock); |
| 3427 | ret = 0; |
| 3428 | goto out; |
| 3429 | } |
| 3430 | spin_unlock(&cache->lock); |
| 3431 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3432 | extent = btrfs_item_ptr(l, slot, |
| 3433 | struct btrfs_extent_item); |
| 3434 | flags = btrfs_extent_flags(l, extent); |
| 3435 | generation = btrfs_extent_generation(l, extent); |
| 3436 | |
Zhao Lei | a323e81 | 2015-07-23 12:29:49 +0800 | [diff] [blame] | 3437 | if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) && |
| 3438 | (key.objectid < logical || |
| 3439 | key.objectid + bytes > |
| 3440 | logical + map->stripe_len)) { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 3441 | btrfs_err(fs_info, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3442 | "scrub: tree block %llu spanning stripes, ignored. logical=%llu", |
Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 3443 | key.objectid, logical); |
Zhao Lei | 9799d2c3 | 2015-08-25 21:31:40 +0800 | [diff] [blame] | 3444 | spin_lock(&sctx->stat_lock); |
| 3445 | sctx->stat.uncorrectable_errors++; |
| 3446 | spin_unlock(&sctx->stat_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3447 | goto next; |
| 3448 | } |
| 3449 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3450 | again: |
| 3451 | extent_logical = key.objectid; |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 3452 | ASSERT(bytes <= U32_MAX); |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3453 | extent_len = bytes; |
| 3454 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3455 | /* |
| 3456 | * trim extent to this stripe |
| 3457 | */ |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3458 | if (extent_logical < logical) { |
| 3459 | extent_len -= logical - extent_logical; |
| 3460 | extent_logical = logical; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3461 | } |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3462 | if (extent_logical + extent_len > |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3463 | logical + map->stripe_len) { |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3464 | extent_len = logical + map->stripe_len - |
| 3465 | extent_logical; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3466 | } |
| 3467 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3468 | extent_physical = extent_logical - logical + physical; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3469 | extent_dev = scrub_dev; |
| 3470 | extent_mirror_num = mirror_num; |
Omar Sandoval | 3293428 | 2018-08-14 11:09:52 -0700 | [diff] [blame] | 3471 | if (sctx->is_dev_replace) |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3472 | scrub_remap_extent(fs_info, extent_logical, |
| 3473 | extent_len, &extent_physical, |
| 3474 | &extent_dev, |
| 3475 | &extent_mirror_num); |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3476 | |
Filipe Manana | 8949030 | 2020-05-08 11:02:07 +0100 | [diff] [blame] | 3477 | if (flags & BTRFS_EXTENT_FLAG_DATA) { |
| 3478 | ret = btrfs_lookup_csums_range(csum_root, |
| 3479 | extent_logical, |
| 3480 | extent_logical + extent_len - 1, |
| 3481 | &sctx->csum_list, 1); |
| 3482 | if (ret) |
| 3483 | goto out; |
| 3484 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3485 | |
Liu Bo | 6ca1765 | 2018-03-07 12:08:09 -0700 | [diff] [blame] | 3486 | ret = scrub_extent(sctx, map, extent_logical, extent_len, |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3487 | extent_physical, extent_dev, flags, |
| 3488 | generation, extent_mirror_num, |
Stefan Behrens | 115930c | 2013-07-04 16:14:23 +0200 | [diff] [blame] | 3489 | extent_logical - logical + physical); |
Zhao Lei | 6fa96d7 | 2015-07-21 12:22:30 +0800 | [diff] [blame] | 3490 | |
| 3491 | scrub_free_csums(sctx); |
| 3492 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3493 | if (ret) |
| 3494 | goto out; |
| 3495 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 3496 | if (sctx->is_dev_replace) |
| 3497 | sync_replace_for_zoned(sctx); |
| 3498 | |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3499 | if (extent_logical + extent_len < |
| 3500 | key.objectid + bytes) { |
Zhao Lei | ffe2d20 | 2015-01-20 15:11:44 +0800 | [diff] [blame] | 3501 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3502 | /* |
| 3503 | * loop until we find next data stripe |
| 3504 | * or we have finished all stripes. |
| 3505 | */ |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3506 | loop: |
| 3507 | physical += map->stripe_len; |
| 3508 | ret = get_raid56_logic_offset(physical, |
| 3509 | num, map, &logical, |
| 3510 | &stripe_logical); |
| 3511 | logical += base; |
| 3512 | |
| 3513 | if (ret && physical < physical_end) { |
| 3514 | stripe_logical += base; |
| 3515 | stripe_end = stripe_logical + |
Zhao Lei | a0dd59d | 2015-07-21 15:42:26 +0800 | [diff] [blame] | 3516 | increment; |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3517 | ret = scrub_raid56_parity(sctx, |
| 3518 | map, scrub_dev, ppath, |
| 3519 | stripe_logical, |
| 3520 | stripe_end); |
| 3521 | if (ret) |
| 3522 | goto out; |
| 3523 | goto loop; |
| 3524 | } |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3525 | } else { |
| 3526 | physical += map->stripe_len; |
| 3527 | logical += increment; |
| 3528 | } |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3529 | if (logical < key.objectid + bytes) { |
| 3530 | cond_resched(); |
| 3531 | goto again; |
| 3532 | } |
| 3533 | |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3534 | if (physical >= physical_end) { |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3535 | stop_loop = 1; |
| 3536 | break; |
| 3537 | } |
| 3538 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3539 | next: |
| 3540 | path->slots[0]++; |
| 3541 | } |
Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 3542 | btrfs_release_path(path); |
Wang Shilong | 3b080b2 | 2014-04-01 18:01:43 +0800 | [diff] [blame] | 3543 | skip: |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3544 | logical += increment; |
| 3545 | physical += map->stripe_len; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3546 | spin_lock(&sctx->stat_lock); |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3547 | if (stop_loop) |
| 3548 | sctx->stat.last_physical = map->stripes[num].physical + |
| 3549 | length; |
| 3550 | else |
| 3551 | sctx->stat.last_physical = physical; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3552 | spin_unlock(&sctx->stat_lock); |
Liu Bo | 625f1c8d | 2013-04-27 02:56:57 +0000 | [diff] [blame] | 3553 | if (stop_loop) |
| 3554 | break; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3555 | } |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3556 | out: |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3557 | /* push queued extents */ |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3558 | scrub_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3559 | mutex_lock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3560 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3561 | mutex_unlock(&sctx->wr_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3562 | |
Arne Jansen | e7786c3 | 2011-05-28 20:58:38 +0000 | [diff] [blame] | 3563 | blk_finish_plug(&plug); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3564 | btrfs_free_path(path); |
Miao Xie | 5a6ac9e | 2014-11-06 17:20:58 +0800 | [diff] [blame] | 3565 | btrfs_free_path(ppath); |
Naohiro Aota | 7db1c5d | 2021-02-04 19:22:14 +0900 | [diff] [blame] | 3566 | |
| 3567 | if (sctx->is_dev_replace && ret >= 0) { |
| 3568 | int ret2; |
| 3569 | |
| 3570 | ret2 = sync_write_pointer_for_zoned(sctx, base + offset, |
| 3571 | map->stripes[num].physical, |
| 3572 | physical_end); |
| 3573 | if (ret2) |
| 3574 | ret = ret2; |
| 3575 | } |
| 3576 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3577 | return ret < 0 ? ret : 0; |
| 3578 | } |
| 3579 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3580 | static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3581 | struct btrfs_device *scrub_dev, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3582 | u64 chunk_offset, u64 length, |
Filipe Manana | 020d5b7 | 2015-11-19 10:57:20 +0000 | [diff] [blame] | 3583 | u64 dev_offset, |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 3584 | struct btrfs_block_group *cache) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3585 | { |
Jeff Mahoney | fb45625 | 2016-06-22 18:54:56 -0400 | [diff] [blame] | 3586 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
David Sterba | c8bf1b6 | 2019-05-17 11:43:17 +0200 | [diff] [blame] | 3587 | struct extent_map_tree *map_tree = &fs_info->mapping_tree; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3588 | struct map_lookup *map; |
| 3589 | struct extent_map *em; |
| 3590 | int i; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3591 | int ret = 0; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3592 | |
David Sterba | c8bf1b6 | 2019-05-17 11:43:17 +0200 | [diff] [blame] | 3593 | read_lock(&map_tree->lock); |
| 3594 | em = lookup_extent_mapping(map_tree, chunk_offset, 1); |
| 3595 | read_unlock(&map_tree->lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3596 | |
Filipe Manana | 020d5b7 | 2015-11-19 10:57:20 +0000 | [diff] [blame] | 3597 | if (!em) { |
| 3598 | /* |
| 3599 | * Might have been an unused block group deleted by the cleaner |
| 3600 | * kthread or relocation. |
| 3601 | */ |
| 3602 | spin_lock(&cache->lock); |
| 3603 | if (!cache->removed) |
| 3604 | ret = -EINVAL; |
| 3605 | spin_unlock(&cache->lock); |
| 3606 | |
| 3607 | return ret; |
| 3608 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3609 | |
Jeff Mahoney | 95617d6 | 2015-06-03 10:55:48 -0400 | [diff] [blame] | 3610 | map = em->map_lookup; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3611 | if (em->start != chunk_offset) |
| 3612 | goto out; |
| 3613 | |
| 3614 | if (em->len < length) |
| 3615 | goto out; |
| 3616 | |
| 3617 | for (i = 0; i < map->num_stripes; ++i) { |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3618 | if (map->stripes[i].dev->bdev == scrub_dev->bdev && |
Arne Jansen | 859acaf | 2012-02-09 15:09:02 +0100 | [diff] [blame] | 3619 | map->stripes[i].physical == dev_offset) { |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3620 | ret = scrub_stripe(sctx, map, scrub_dev, i, |
Filipe Manana | 2473d24 | 2020-05-08 11:01:10 +0100 | [diff] [blame] | 3621 | chunk_offset, length, cache); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3622 | if (ret) |
| 3623 | goto out; |
| 3624 | } |
| 3625 | } |
| 3626 | out: |
| 3627 | free_extent_map(em); |
| 3628 | |
| 3629 | return ret; |
| 3630 | } |
| 3631 | |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 3632 | static int finish_extent_writes_for_zoned(struct btrfs_root *root, |
| 3633 | struct btrfs_block_group *cache) |
| 3634 | { |
| 3635 | struct btrfs_fs_info *fs_info = cache->fs_info; |
| 3636 | struct btrfs_trans_handle *trans; |
| 3637 | |
| 3638 | if (!btrfs_is_zoned(fs_info)) |
| 3639 | return 0; |
| 3640 | |
| 3641 | btrfs_wait_block_group_reservations(cache); |
| 3642 | btrfs_wait_nocow_writers(cache); |
| 3643 | btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); |
| 3644 | |
| 3645 | trans = btrfs_join_transaction(root); |
| 3646 | if (IS_ERR(trans)) |
| 3647 | return PTR_ERR(trans); |
| 3648 | return btrfs_commit_transaction(trans); |
| 3649 | } |
| 3650 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3651 | static noinline_for_stack |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3652 | int scrub_enumerate_chunks(struct scrub_ctx *sctx, |
Omar Sandoval | 3293428 | 2018-08-14 11:09:52 -0700 | [diff] [blame] | 3653 | struct btrfs_device *scrub_dev, u64 start, u64 end) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3654 | { |
| 3655 | struct btrfs_dev_extent *dev_extent = NULL; |
| 3656 | struct btrfs_path *path; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3657 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
| 3658 | struct btrfs_root *root = fs_info->dev_root; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3659 | u64 length; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3660 | u64 chunk_offset; |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3661 | int ret = 0; |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3662 | int ro_set; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3663 | int slot; |
| 3664 | struct extent_buffer *l; |
| 3665 | struct btrfs_key key; |
| 3666 | struct btrfs_key found_key; |
David Sterba | 32da5386 | 2019-10-29 19:20:18 +0100 | [diff] [blame] | 3667 | struct btrfs_block_group *cache; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3668 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3669 | |
| 3670 | path = btrfs_alloc_path(); |
| 3671 | if (!path) |
| 3672 | return -ENOMEM; |
| 3673 | |
David Sterba | e4058b5 | 2015-11-27 16:31:35 +0100 | [diff] [blame] | 3674 | path->reada = READA_FORWARD; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3675 | path->search_commit_root = 1; |
| 3676 | path->skip_locking = 1; |
| 3677 | |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3678 | key.objectid = scrub_dev->devid; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3679 | key.offset = 0ull; |
| 3680 | key.type = BTRFS_DEV_EXTENT_KEY; |
| 3681 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3682 | while (1) { |
| 3683 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| 3684 | if (ret < 0) |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3685 | break; |
| 3686 | if (ret > 0) { |
| 3687 | if (path->slots[0] >= |
| 3688 | btrfs_header_nritems(path->nodes[0])) { |
| 3689 | ret = btrfs_next_leaf(root, path); |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3690 | if (ret < 0) |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3691 | break; |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3692 | if (ret > 0) { |
| 3693 | ret = 0; |
| 3694 | break; |
| 3695 | } |
| 3696 | } else { |
| 3697 | ret = 0; |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3698 | } |
| 3699 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3700 | |
| 3701 | l = path->nodes[0]; |
| 3702 | slot = path->slots[0]; |
| 3703 | |
| 3704 | btrfs_item_key_to_cpu(l, &found_key, slot); |
| 3705 | |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3706 | if (found_key.objectid != scrub_dev->devid) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3707 | break; |
| 3708 | |
David Sterba | 962a298 | 2014-06-04 18:41:45 +0200 | [diff] [blame] | 3709 | if (found_key.type != BTRFS_DEV_EXTENT_KEY) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3710 | break; |
| 3711 | |
| 3712 | if (found_key.offset >= end) |
| 3713 | break; |
| 3714 | |
| 3715 | if (found_key.offset < key.offset) |
| 3716 | break; |
| 3717 | |
| 3718 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
| 3719 | length = btrfs_dev_extent_length(l, dev_extent); |
| 3720 | |
Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3721 | if (found_key.offset + length <= start) |
| 3722 | goto skip; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3723 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3724 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); |
| 3725 | |
| 3726 | /* |
| 3727 | * get a reference on the corresponding block group to prevent |
| 3728 | * the chunk from going away while we scrub it |
| 3729 | */ |
| 3730 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); |
Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3731 | |
| 3732 | /* some chunks are removed but not committed to disk yet, |
| 3733 | * continue scrubbing */ |
| 3734 | if (!cache) |
| 3735 | goto skip; |
| 3736 | |
Naohiro Aota | 78ce9fc | 2021-02-04 19:22:11 +0900 | [diff] [blame] | 3737 | if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { |
| 3738 | spin_lock(&cache->lock); |
| 3739 | if (!cache->to_copy) { |
| 3740 | spin_unlock(&cache->lock); |
Filipe Manana | 0dc16ef | 2021-04-14 14:05:26 +0100 | [diff] [blame] | 3741 | btrfs_put_block_group(cache); |
| 3742 | goto skip; |
Naohiro Aota | 78ce9fc | 2021-02-04 19:22:11 +0900 | [diff] [blame] | 3743 | } |
| 3744 | spin_unlock(&cache->lock); |
| 3745 | } |
| 3746 | |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3747 | /* |
Filipe Manana | 2473d24 | 2020-05-08 11:01:10 +0100 | [diff] [blame] | 3748 | * Make sure that while we are scrubbing the corresponding block |
| 3749 | * group doesn't get its logical address and its device extents |
| 3750 | * reused for another block group, which can possibly be of a |
| 3751 | * different type and different profile. We do this to prevent |
| 3752 | * false error detections and crashes due to bogus attempts to |
| 3753 | * repair extents. |
| 3754 | */ |
| 3755 | spin_lock(&cache->lock); |
| 3756 | if (cache->removed) { |
| 3757 | spin_unlock(&cache->lock); |
| 3758 | btrfs_put_block_group(cache); |
| 3759 | goto skip; |
| 3760 | } |
Filipe Manana | 6b7304a | 2020-05-08 11:01:47 +0100 | [diff] [blame] | 3761 | btrfs_freeze_block_group(cache); |
Filipe Manana | 2473d24 | 2020-05-08 11:01:10 +0100 | [diff] [blame] | 3762 | spin_unlock(&cache->lock); |
| 3763 | |
| 3764 | /* |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3765 | * we need call btrfs_inc_block_group_ro() with scrubs_paused, |
| 3766 | * to avoid deadlock caused by: |
| 3767 | * btrfs_inc_block_group_ro() |
| 3768 | * -> btrfs_wait_for_commit() |
| 3769 | * -> btrfs_commit_transaction() |
| 3770 | * -> btrfs_scrub_pause() |
| 3771 | */ |
| 3772 | scrub_pause_on(fs_info); |
Qu Wenruo | b12de52 | 2019-11-15 10:09:00 +0800 | [diff] [blame] | 3773 | |
| 3774 | /* |
| 3775 | * Don't do chunk preallocation for scrub. |
| 3776 | * |
| 3777 | * This is especially important for SYSTEM bgs, or we can hit |
| 3778 | * -EFBIG from btrfs_finish_chunk_alloc() like: |
| 3779 | * 1. The only SYSTEM bg is marked RO. |
| 3780 | * Since SYSTEM bg is small, that's pretty common. |
| 3781 | * 2. New SYSTEM bg will be allocated |
| 3782 | * Due to regular version will allocate new chunk. |
| 3783 | * 3. New SYSTEM bg is empty and will get cleaned up |
| 3784 | * Before cleanup really happens, it's marked RO again. |
| 3785 | * 4. Empty SYSTEM bg get scrubbed |
| 3786 | * We go back to 2. |
| 3787 | * |
| 3788 | * This can easily boost the amount of SYSTEM chunks if cleaner |
| 3789 | * thread can't be triggered fast enough, and use up all space |
| 3790 | * of btrfs_super_block::sys_chunk_array |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3791 | * |
| 3792 | * While for dev replace, we need to try our best to mark block |
| 3793 | * group RO, to prevent race between: |
| 3794 | * - Write duplication |
| 3795 | * Contains latest data |
| 3796 | * - Scrub copy |
| 3797 | * Contains data from commit tree |
| 3798 | * |
| 3799 | * If target block group is not marked RO, nocow writes can |
| 3800 | * be overwritten by scrub copy, causing data corruption. |
| 3801 | * So for dev-replace, it's not allowed to continue if a block |
| 3802 | * group is not RO. |
Qu Wenruo | b12de52 | 2019-11-15 10:09:00 +0800 | [diff] [blame] | 3803 | */ |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3804 | ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); |
Naohiro Aota | de17add | 2021-02-04 19:22:13 +0900 | [diff] [blame] | 3805 | if (!ret && sctx->is_dev_replace) { |
| 3806 | ret = finish_extent_writes_for_zoned(root, cache); |
| 3807 | if (ret) { |
| 3808 | btrfs_dec_block_group_ro(cache); |
| 3809 | scrub_pause_off(fs_info); |
| 3810 | btrfs_put_block_group(cache); |
| 3811 | break; |
| 3812 | } |
| 3813 | } |
| 3814 | |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3815 | if (ret == 0) { |
| 3816 | ro_set = 1; |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3817 | } else if (ret == -ENOSPC && !sctx->is_dev_replace) { |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3818 | /* |
| 3819 | * btrfs_inc_block_group_ro return -ENOSPC when it |
| 3820 | * failed in creating new chunk for metadata. |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3821 | * It is not a problem for scrub, because |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3822 | * metadata are always cowed, and our scrub paused |
| 3823 | * commit_transactions. |
| 3824 | */ |
| 3825 | ro_set = 0; |
Filipe Manana | 195a49e | 2021-02-05 12:55:37 +0000 | [diff] [blame] | 3826 | } else if (ret == -ETXTBSY) { |
| 3827 | btrfs_warn(fs_info, |
| 3828 | "skipping scrub of block group %llu due to active swapfile", |
| 3829 | cache->start); |
| 3830 | scrub_pause_off(fs_info); |
| 3831 | ret = 0; |
| 3832 | goto skip_unfreeze; |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3833 | } else { |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 3834 | btrfs_warn(fs_info, |
David Sterba | 913e153 | 2017-07-13 15:32:18 +0200 | [diff] [blame] | 3835 | "failed setting block group ro: %d", ret); |
Filipe Manana | 6b7304a | 2020-05-08 11:01:47 +0100 | [diff] [blame] | 3836 | btrfs_unfreeze_block_group(cache); |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3837 | btrfs_put_block_group(cache); |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3838 | scrub_pause_off(fs_info); |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3839 | break; |
| 3840 | } |
| 3841 | |
Qu Wenruo | 1bbb97b | 2020-01-24 07:58:20 +0800 | [diff] [blame] | 3842 | /* |
| 3843 | * Now the target block is marked RO, wait for nocow writes to |
| 3844 | * finish before dev-replace. |
| 3845 | * COW is fine, as COW never overwrites extents in commit tree. |
| 3846 | */ |
| 3847 | if (sctx->is_dev_replace) { |
| 3848 | btrfs_wait_nocow_writers(cache); |
| 3849 | btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, |
| 3850 | cache->length); |
| 3851 | } |
| 3852 | |
| 3853 | scrub_pause_off(fs_info); |
Dan Carpenter | 3ec17a6 | 2019-10-31 13:55:01 +0300 | [diff] [blame] | 3854 | down_write(&dev_replace->rwsem); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3855 | dev_replace->cursor_right = found_key.offset + length; |
| 3856 | dev_replace->cursor_left = found_key.offset; |
| 3857 | dev_replace->item_needs_writeback = 1; |
David Sterba | cb5583d | 2018-09-07 16:11:23 +0200 | [diff] [blame] | 3858 | up_write(&dev_replace->rwsem); |
| 3859 | |
Zhao Lei | 8c204c9 | 2015-08-19 15:02:40 +0800 | [diff] [blame] | 3860 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, |
Omar Sandoval | 3293428 | 2018-08-14 11:09:52 -0700 | [diff] [blame] | 3861 | found_key.offset, cache); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3862 | |
| 3863 | /* |
| 3864 | * flush, submit all pending read and write bios, afterwards |
| 3865 | * wait for them. |
| 3866 | * Note that in the dev replace case, a read request causes |
| 3867 | * write requests that are submitted in the read completion |
| 3868 | * worker. Therefore in the current situation, it is required |
| 3869 | * that all write requests are flushed, so that all read and |
| 3870 | * write requests are really completed when bios_in_flight |
| 3871 | * changes to 0. |
| 3872 | */ |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 3873 | sctx->flush_all_writes = true; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3874 | scrub_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3875 | mutex_lock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3876 | scrub_wr_submit(sctx); |
David Sterba | 3fb9930 | 2017-05-16 19:10:32 +0200 | [diff] [blame] | 3877 | mutex_unlock(&sctx->wr_lock); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3878 | |
| 3879 | wait_event(sctx->list_wait, |
| 3880 | atomic_read(&sctx->bios_in_flight) == 0); |
Zhaolei | b708ce9 | 2015-08-05 16:43:29 +0800 | [diff] [blame] | 3881 | |
| 3882 | scrub_pause_on(fs_info); |
Wang Shilong | 12cf937 | 2014-02-19 19:24:17 +0800 | [diff] [blame] | 3883 | |
| 3884 | /* |
| 3885 | * must be called before we decrease @scrub_paused. |
| 3886 | * make sure we don't block transaction commit while |
| 3887 | * we are waiting pending workers finished. |
| 3888 | */ |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3889 | wait_event(sctx->list_wait, |
| 3890 | atomic_read(&sctx->workers_pending) == 0); |
David Sterba | 2073c4c | 2017-03-31 17:12:51 +0200 | [diff] [blame] | 3891 | sctx->flush_all_writes = false; |
Wang Shilong | 12cf937 | 2014-02-19 19:24:17 +0800 | [diff] [blame] | 3892 | |
Zhaolei | b708ce9 | 2015-08-05 16:43:29 +0800 | [diff] [blame] | 3893 | scrub_pause_off(fs_info); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3894 | |
Naohiro Aota | 78ce9fc | 2021-02-04 19:22:11 +0900 | [diff] [blame] | 3895 | if (sctx->is_dev_replace && |
| 3896 | !btrfs_finish_block_group_to_copy(dev_replace->srcdev, |
| 3897 | cache, found_key.offset)) |
| 3898 | ro_set = 0; |
| 3899 | |
Dan Carpenter | 3ec17a6 | 2019-10-31 13:55:01 +0300 | [diff] [blame] | 3900 | down_write(&dev_replace->rwsem); |
Filipe Manana | 1a1a8b7 | 2016-05-14 19:44:40 +0100 | [diff] [blame] | 3901 | dev_replace->cursor_left = dev_replace->cursor_right; |
| 3902 | dev_replace->item_needs_writeback = 1; |
Dan Carpenter | 3ec17a6 | 2019-10-31 13:55:01 +0300 | [diff] [blame] | 3903 | up_write(&dev_replace->rwsem); |
Filipe Manana | 1a1a8b7 | 2016-05-14 19:44:40 +0100 | [diff] [blame] | 3904 | |
Zhaolei | 76a8efa | 2015-11-17 18:46:17 +0800 | [diff] [blame] | 3905 | if (ro_set) |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 3906 | btrfs_dec_block_group_ro(cache); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3907 | |
Filipe Manana | 758f2df | 2015-11-19 11:45:48 +0000 | [diff] [blame] | 3908 | /* |
| 3909 | * We might have prevented the cleaner kthread from deleting |
| 3910 | * this block group if it was already unused because we raced |
| 3911 | * and set it to RO mode first. So add it back to the unused |
| 3912 | * list, otherwise it might not ever be deleted unless a manual |
| 3913 | * balance is triggered or it becomes used and unused again. |
| 3914 | */ |
| 3915 | spin_lock(&cache->lock); |
| 3916 | if (!cache->removed && !cache->ro && cache->reserved == 0 && |
David Sterba | bf38be6 | 2019-10-23 18:48:11 +0200 | [diff] [blame] | 3917 | cache->used == 0) { |
Filipe Manana | 758f2df | 2015-11-19 11:45:48 +0000 | [diff] [blame] | 3918 | spin_unlock(&cache->lock); |
Dennis Zhou | 6e80d4f | 2019-12-13 16:22:15 -0800 | [diff] [blame] | 3919 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) |
| 3920 | btrfs_discard_queue_work(&fs_info->discard_ctl, |
| 3921 | cache); |
| 3922 | else |
| 3923 | btrfs_mark_bg_unused(cache); |
Filipe Manana | 758f2df | 2015-11-19 11:45:48 +0000 | [diff] [blame] | 3924 | } else { |
| 3925 | spin_unlock(&cache->lock); |
| 3926 | } |
Filipe Manana | 195a49e | 2021-02-05 12:55:37 +0000 | [diff] [blame] | 3927 | skip_unfreeze: |
Filipe Manana | 6b7304a | 2020-05-08 11:01:47 +0100 | [diff] [blame] | 3928 | btrfs_unfreeze_block_group(cache); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3929 | btrfs_put_block_group(cache); |
| 3930 | if (ret) |
| 3931 | break; |
Omar Sandoval | 3293428 | 2018-08-14 11:09:52 -0700 | [diff] [blame] | 3932 | if (sctx->is_dev_replace && |
Stefan Behrens | af1be4f | 2012-11-27 17:39:51 +0000 | [diff] [blame] | 3933 | atomic64_read(&dev_replace->num_write_errors) > 0) { |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 3934 | ret = -EIO; |
| 3935 | break; |
| 3936 | } |
| 3937 | if (sctx->stat.malloc_errors > 0) { |
| 3938 | ret = -ENOMEM; |
| 3939 | break; |
| 3940 | } |
Qu Wenruo | ced96ed | 2014-06-19 10:42:51 +0800 | [diff] [blame] | 3941 | skip: |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3942 | key.offset = found_key.offset + length; |
Chris Mason | 7126733 | 2011-05-23 06:30:52 -0400 | [diff] [blame] | 3943 | btrfs_release_path(path); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3944 | } |
| 3945 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3946 | btrfs_free_path(path); |
Arne Jansen | 8c51032 | 2011-06-03 10:09:26 +0200 | [diff] [blame] | 3947 | |
Zhaolei | 55e3a60 | 2015-08-05 16:43:30 +0800 | [diff] [blame] | 3948 | return ret; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3949 | } |
| 3950 | |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3951 | static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, |
| 3952 | struct btrfs_device *scrub_dev) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3953 | { |
| 3954 | int i; |
| 3955 | u64 bytenr; |
| 3956 | u64 gen; |
| 3957 | int ret; |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3958 | struct btrfs_fs_info *fs_info = sctx->fs_info; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3959 | |
Josef Bacik | 8496153 | 2021-10-05 16:35:25 -0400 | [diff] [blame] | 3960 | if (BTRFS_FS_ERROR(fs_info)) |
Josef Bacik | fbabd4a | 2020-07-21 10:38:37 -0400 | [diff] [blame] | 3961 | return -EROFS; |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 3962 | |
Miao Xie | 5f54606 | 2014-07-24 11:37:09 +0800 | [diff] [blame] | 3963 | /* Seed devices of a new filesystem has their own generation. */ |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3964 | if (scrub_dev->fs_devices != fs_info->fs_devices) |
Miao Xie | 5f54606 | 2014-07-24 11:37:09 +0800 | [diff] [blame] | 3965 | gen = scrub_dev->generation; |
| 3966 | else |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3967 | gen = fs_info->last_trans_committed; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3968 | |
| 3969 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
| 3970 | bytenr = btrfs_sb_offset(i); |
Miao Xie | 935e5cc | 2014-09-03 21:35:33 +0800 | [diff] [blame] | 3971 | if (bytenr + BTRFS_SUPER_INFO_SIZE > |
| 3972 | scrub_dev->commit_total_bytes) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3973 | break; |
Naohiro Aota | 1265925 | 2020-11-10 20:26:14 +0900 | [diff] [blame] | 3974 | if (!btrfs_check_super_location(scrub_dev, bytenr)) |
| 3975 | continue; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3976 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 3977 | ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, |
Stefan Behrens | a36cf8b | 2012-11-02 13:26:57 +0100 | [diff] [blame] | 3978 | scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i, |
Qu Wenruo | 96e63a4 | 2020-11-03 21:31:02 +0800 | [diff] [blame] | 3979 | NULL, bytenr); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3980 | if (ret) |
| 3981 | return ret; |
| 3982 | } |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 3983 | wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 3984 | |
| 3985 | return 0; |
| 3986 | } |
| 3987 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 3988 | static void scrub_workers_put(struct btrfs_fs_info *fs_info) |
| 3989 | { |
| 3990 | if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, |
| 3991 | &fs_info->scrub_lock)) { |
| 3992 | struct btrfs_workqueue *scrub_workers = NULL; |
| 3993 | struct btrfs_workqueue *scrub_wr_comp = NULL; |
| 3994 | struct btrfs_workqueue *scrub_parity = NULL; |
| 3995 | |
| 3996 | scrub_workers = fs_info->scrub_workers; |
| 3997 | scrub_wr_comp = fs_info->scrub_wr_completion_workers; |
| 3998 | scrub_parity = fs_info->scrub_parity_workers; |
| 3999 | |
| 4000 | fs_info->scrub_workers = NULL; |
| 4001 | fs_info->scrub_wr_completion_workers = NULL; |
| 4002 | fs_info->scrub_parity_workers = NULL; |
| 4003 | mutex_unlock(&fs_info->scrub_lock); |
| 4004 | |
| 4005 | btrfs_destroy_workqueue(scrub_workers); |
| 4006 | btrfs_destroy_workqueue(scrub_wr_comp); |
| 4007 | btrfs_destroy_workqueue(scrub_parity); |
| 4008 | } |
| 4009 | } |
| 4010 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4011 | /* |
| 4012 | * get a reference count on fs_info->scrub_workers. start worker if necessary |
| 4013 | */ |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4014 | static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, |
| 4015 | int is_dev_replace) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4016 | { |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4017 | struct btrfs_workqueue *scrub_workers = NULL; |
| 4018 | struct btrfs_workqueue *scrub_wr_comp = NULL; |
| 4019 | struct btrfs_workqueue *scrub_parity = NULL; |
David Sterba | 6f01105 | 2015-02-16 18:34:01 +0100 | [diff] [blame] | 4020 | unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; |
Qu Wenruo | 0339ef2 | 2014-02-28 10:46:17 +0800 | [diff] [blame] | 4021 | int max_active = fs_info->thread_pool_size; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4022 | int ret = -ENOMEM; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4023 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4024 | if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) |
| 4025 | return 0; |
Anand Jain | eb4318e | 2019-01-30 14:45:01 +0800 | [diff] [blame] | 4026 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4027 | scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, |
| 4028 | is_dev_replace ? 1 : max_active, 4); |
| 4029 | if (!scrub_workers) |
| 4030 | goto fail_scrub_workers; |
| 4031 | |
| 4032 | scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, |
| 4033 | max_active, 2); |
| 4034 | if (!scrub_wr_comp) |
| 4035 | goto fail_scrub_wr_completion_workers; |
| 4036 | |
| 4037 | scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, |
| 4038 | max_active, 2); |
| 4039 | if (!scrub_parity) |
| 4040 | goto fail_scrub_parity_workers; |
| 4041 | |
| 4042 | mutex_lock(&fs_info->scrub_lock); |
Anand Jain | ff09c4c | 2019-01-30 14:45:02 +0800 | [diff] [blame] | 4043 | if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4044 | ASSERT(fs_info->scrub_workers == NULL && |
| 4045 | fs_info->scrub_wr_completion_workers == NULL && |
| 4046 | fs_info->scrub_parity_workers == NULL); |
| 4047 | fs_info->scrub_workers = scrub_workers; |
| 4048 | fs_info->scrub_wr_completion_workers = scrub_wr_comp; |
| 4049 | fs_info->scrub_parity_workers = scrub_parity; |
Anand Jain | ff09c4c | 2019-01-30 14:45:02 +0800 | [diff] [blame] | 4050 | refcount_set(&fs_info->scrub_workers_refcnt, 1); |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4051 | mutex_unlock(&fs_info->scrub_lock); |
| 4052 | return 0; |
Arne Jansen | 632dd77 | 2011-06-10 12:07:07 +0200 | [diff] [blame] | 4053 | } |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4054 | /* Other thread raced in and created the workers for us */ |
| 4055 | refcount_inc(&fs_info->scrub_workers_refcnt); |
| 4056 | mutex_unlock(&fs_info->scrub_lock); |
Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4057 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4058 | ret = 0; |
| 4059 | btrfs_destroy_workqueue(scrub_parity); |
Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4060 | fail_scrub_parity_workers: |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4061 | btrfs_destroy_workqueue(scrub_wr_comp); |
Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4062 | fail_scrub_wr_completion_workers: |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4063 | btrfs_destroy_workqueue(scrub_workers); |
Zhao Lei | e82afc5 | 2015-06-12 20:36:58 +0800 | [diff] [blame] | 4064 | fail_scrub_workers: |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4065 | return ret; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4066 | } |
| 4067 | |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4068 | int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, |
| 4069 | u64 end, struct btrfs_scrub_progress *progress, |
Stefan Behrens | 63a212a | 2012-11-05 18:29:28 +0100 | [diff] [blame] | 4070 | int readonly, int is_dev_replace) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4071 | { |
Josef Bacik | 562d7b1 | 2021-10-05 16:12:42 -0400 | [diff] [blame] | 4072 | struct btrfs_dev_lookup_args args = { .devid = devid }; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4073 | struct scrub_ctx *sctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4074 | int ret; |
| 4075 | struct btrfs_device *dev; |
Filipe Manana | a5fb114 | 2018-11-26 20:07:17 +0000 | [diff] [blame] | 4076 | unsigned int nofs_flag; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4077 | |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4078 | if (btrfs_fs_closing(fs_info)) |
David Sterba | 6c3abed | 2019-02-25 19:57:41 +0100 | [diff] [blame] | 4079 | return -EAGAIN; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4080 | |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4081 | if (fs_info->nodesize > BTRFS_STRIPE_LEN) { |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 4082 | /* |
| 4083 | * in this case scrub is unable to calculate the checksum |
| 4084 | * the way scrub is implemented. Do not handle this |
| 4085 | * situation at all because it won't ever happen. |
| 4086 | */ |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 4087 | btrfs_err(fs_info, |
| 4088 | "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails", |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4089 | fs_info->nodesize, |
| 4090 | BTRFS_STRIPE_LEN); |
Stefan Behrens | b5d67f6 | 2012-03-27 14:21:27 -0400 | [diff] [blame] | 4091 | return -EINVAL; |
| 4092 | } |
| 4093 | |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4094 | if (fs_info->nodesize > |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4095 | PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK || |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4096 | fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4097 | /* |
| 4098 | * would exhaust the array bounds of pagev member in |
| 4099 | * struct scrub_block |
| 4100 | */ |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 4101 | btrfs_err(fs_info, |
| 4102 | "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails", |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4103 | fs_info->nodesize, |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4104 | SCRUB_MAX_PAGES_PER_BLOCK, |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 4105 | fs_info->sectorsize, |
Stefan Behrens | 7a9e998 | 2012-11-02 14:58:04 +0100 | [diff] [blame] | 4106 | SCRUB_MAX_PAGES_PER_BLOCK); |
| 4107 | return -EINVAL; |
| 4108 | } |
| 4109 | |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4110 | /* Allocate outside of device_list_mutex */ |
| 4111 | sctx = scrub_setup_ctx(fs_info, is_dev_replace); |
| 4112 | if (IS_ERR(sctx)) |
| 4113 | return PTR_ERR(sctx); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4114 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4115 | ret = scrub_workers_get(fs_info, is_dev_replace); |
| 4116 | if (ret) |
| 4117 | goto out_free_ctx; |
| 4118 | |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4119 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
Josef Bacik | 562d7b1 | 2021-10-05 16:12:42 -0400 | [diff] [blame] | 4120 | dev = btrfs_find_device(fs_info->fs_devices, &args); |
Anand Jain | e6e674b | 2017-12-04 12:54:54 +0800 | [diff] [blame] | 4121 | if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && |
| 4122 | !is_dev_replace)) { |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4123 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4124 | ret = -ENODEV; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4125 | goto out; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4126 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4127 | |
Anand Jain | ebbede4 | 2017-12-04 12:54:52 +0800 | [diff] [blame] | 4128 | if (!is_dev_replace && !readonly && |
| 4129 | !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { |
Miao Xie | 5d68da3 | 2014-07-24 11:37:07 +0800 | [diff] [blame] | 4130 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
David Sterba | a4852cf | 2020-07-09 11:25:40 +0200 | [diff] [blame] | 4131 | btrfs_err_in_rcu(fs_info, |
| 4132 | "scrub on devid %llu: filesystem on %s is not writable", |
| 4133 | devid, rcu_str_deref(dev->name)); |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4134 | ret = -EROFS; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4135 | goto out; |
Miao Xie | 5d68da3 | 2014-07-24 11:37:07 +0800 | [diff] [blame] | 4136 | } |
| 4137 | |
Wang Shilong | 3b7a016 | 2013-10-12 02:11:12 +0800 | [diff] [blame] | 4138 | mutex_lock(&fs_info->scrub_lock); |
Anand Jain | e12c962 | 2017-12-04 12:54:53 +0800 | [diff] [blame] | 4139 | if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || |
Anand Jain | 401e29c | 2017-12-04 12:54:55 +0800 | [diff] [blame] | 4140 | test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4141 | mutex_unlock(&fs_info->scrub_lock); |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4142 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4143 | ret = -EIO; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4144 | goto out; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4145 | } |
| 4146 | |
David Sterba | cb5583d | 2018-09-07 16:11:23 +0200 | [diff] [blame] | 4147 | down_read(&fs_info->dev_replace.rwsem); |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4148 | if (dev->scrub_ctx || |
Stefan Behrens | 8dabb74 | 2012-11-06 13:15:27 +0100 | [diff] [blame] | 4149 | (!is_dev_replace && |
| 4150 | btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { |
David Sterba | cb5583d | 2018-09-07 16:11:23 +0200 | [diff] [blame] | 4151 | up_read(&fs_info->dev_replace.rwsem); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4152 | mutex_unlock(&fs_info->scrub_lock); |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4153 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4154 | ret = -EINPROGRESS; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4155 | goto out; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4156 | } |
David Sterba | cb5583d | 2018-09-07 16:11:23 +0200 | [diff] [blame] | 4157 | up_read(&fs_info->dev_replace.rwsem); |
Wang Shilong | 3b7a016 | 2013-10-12 02:11:12 +0800 | [diff] [blame] | 4158 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4159 | sctx->readonly = readonly; |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4160 | dev->scrub_ctx = sctx; |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4161 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4162 | |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4163 | /* |
| 4164 | * checking @scrub_pause_req here, we can avoid |
| 4165 | * race between committing transaction and scrubbing. |
| 4166 | */ |
Wang Shilong | cb7ab02 | 2013-12-04 21:16:53 +0800 | [diff] [blame] | 4167 | __scrub_blocked_if_needed(fs_info); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4168 | atomic_inc(&fs_info->scrubs_running); |
| 4169 | mutex_unlock(&fs_info->scrub_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4170 | |
Filipe Manana | a5fb114 | 2018-11-26 20:07:17 +0000 | [diff] [blame] | 4171 | /* |
| 4172 | * In order to avoid deadlock with reclaim when there is a transaction |
| 4173 | * trying to pause scrub, make sure we use GFP_NOFS for all the |
| 4174 | * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity() |
| 4175 | * invoked by our callees. The pausing request is done when the |
| 4176 | * transaction commit starts, and it blocks the transaction until scrub |
| 4177 | * is paused (done at specific points at scrub_stripe() or right above |
| 4178 | * before incrementing fs_info->scrubs_running). |
| 4179 | */ |
| 4180 | nofs_flag = memalloc_nofs_save(); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4181 | if (!is_dev_replace) { |
Anand Jain | d1e1442 | 2019-01-03 16:17:40 +0800 | [diff] [blame] | 4182 | btrfs_info(fs_info, "scrub: started on devid %llu", devid); |
Wang Shilong | 9b011ad | 2013-10-25 19:12:02 +0800 | [diff] [blame] | 4183 | /* |
| 4184 | * by holding device list mutex, we can |
| 4185 | * kick off writing super in log tree sync. |
| 4186 | */ |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4187 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4188 | ret = scrub_supers(sctx, dev); |
Wang Shilong | 3cb0929 | 2013-12-04 21:15:19 +0800 | [diff] [blame] | 4189 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4190 | } |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4191 | |
| 4192 | if (!ret) |
Omar Sandoval | 3293428 | 2018-08-14 11:09:52 -0700 | [diff] [blame] | 4193 | ret = scrub_enumerate_chunks(sctx, dev, start, end); |
Filipe Manana | a5fb114 | 2018-11-26 20:07:17 +0000 | [diff] [blame] | 4194 | memalloc_nofs_restore(nofs_flag); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4195 | |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 4196 | wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4197 | atomic_dec(&fs_info->scrubs_running); |
| 4198 | wake_up(&fs_info->scrub_pause_wait); |
| 4199 | |
Stefan Behrens | b6bfebc | 2012-11-02 16:44:58 +0100 | [diff] [blame] | 4200 | wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); |
Jan Schmidt | 0ef8e45 | 2011-06-13 20:04:15 +0200 | [diff] [blame] | 4201 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4202 | if (progress) |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4203 | memcpy(progress, &sctx->stat, sizeof(*progress)); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4204 | |
Anand Jain | d1e1442 | 2019-01-03 16:17:40 +0800 | [diff] [blame] | 4205 | if (!is_dev_replace) |
| 4206 | btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", |
| 4207 | ret ? "not finished" : "finished", devid, ret); |
| 4208 | |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4209 | mutex_lock(&fs_info->scrub_lock); |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4210 | dev->scrub_ctx = NULL; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4211 | mutex_unlock(&fs_info->scrub_lock); |
| 4212 | |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4213 | scrub_workers_put(fs_info); |
Filipe Manana | f55985f | 2015-02-09 21:14:24 +0000 | [diff] [blame] | 4214 | scrub_put_ctx(sctx); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4215 | |
| 4216 | return ret; |
Josef Bacik | e89c4a9 | 2020-08-10 11:42:29 -0400 | [diff] [blame] | 4217 | out: |
| 4218 | scrub_workers_put(fs_info); |
David Sterba | 0e94c4f4 | 2018-12-04 16:11:56 +0100 | [diff] [blame] | 4219 | out_free_ctx: |
| 4220 | scrub_free_ctx(sctx); |
| 4221 | |
| 4222 | return ret; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4223 | } |
| 4224 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4225 | void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4226 | { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4227 | mutex_lock(&fs_info->scrub_lock); |
| 4228 | atomic_inc(&fs_info->scrub_pause_req); |
| 4229 | while (atomic_read(&fs_info->scrubs_paused) != |
| 4230 | atomic_read(&fs_info->scrubs_running)) { |
| 4231 | mutex_unlock(&fs_info->scrub_lock); |
| 4232 | wait_event(fs_info->scrub_pause_wait, |
| 4233 | atomic_read(&fs_info->scrubs_paused) == |
| 4234 | atomic_read(&fs_info->scrubs_running)); |
| 4235 | mutex_lock(&fs_info->scrub_lock); |
| 4236 | } |
| 4237 | mutex_unlock(&fs_info->scrub_lock); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4238 | } |
| 4239 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4240 | void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4241 | { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4242 | atomic_dec(&fs_info->scrub_pause_req); |
| 4243 | wake_up(&fs_info->scrub_pause_wait); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4244 | } |
| 4245 | |
Stefan Behrens | aa1b8cd | 2012-11-05 17:03:39 +0100 | [diff] [blame] | 4246 | int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4247 | { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4248 | mutex_lock(&fs_info->scrub_lock); |
| 4249 | if (!atomic_read(&fs_info->scrubs_running)) { |
| 4250 | mutex_unlock(&fs_info->scrub_lock); |
| 4251 | return -ENOTCONN; |
| 4252 | } |
| 4253 | |
| 4254 | atomic_inc(&fs_info->scrub_cancel_req); |
| 4255 | while (atomic_read(&fs_info->scrubs_running)) { |
| 4256 | mutex_unlock(&fs_info->scrub_lock); |
| 4257 | wait_event(fs_info->scrub_pause_wait, |
| 4258 | atomic_read(&fs_info->scrubs_running) == 0); |
| 4259 | mutex_lock(&fs_info->scrub_lock); |
| 4260 | } |
| 4261 | atomic_dec(&fs_info->scrub_cancel_req); |
| 4262 | mutex_unlock(&fs_info->scrub_lock); |
| 4263 | |
| 4264 | return 0; |
| 4265 | } |
| 4266 | |
David Sterba | 163e97e | 2019-03-20 16:32:55 +0100 | [diff] [blame] | 4267 | int btrfs_scrub_cancel_dev(struct btrfs_device *dev) |
Jeff Mahoney | 49b25e0 | 2012-03-01 17:24:58 +0100 | [diff] [blame] | 4268 | { |
David Sterba | 163e97e | 2019-03-20 16:32:55 +0100 | [diff] [blame] | 4269 | struct btrfs_fs_info *fs_info = dev->fs_info; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4270 | struct scrub_ctx *sctx; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4271 | |
| 4272 | mutex_lock(&fs_info->scrub_lock); |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4273 | sctx = dev->scrub_ctx; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4274 | if (!sctx) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4275 | mutex_unlock(&fs_info->scrub_lock); |
| 4276 | return -ENOTCONN; |
| 4277 | } |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4278 | atomic_inc(&sctx->cancel_req); |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4279 | while (dev->scrub_ctx) { |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4280 | mutex_unlock(&fs_info->scrub_lock); |
| 4281 | wait_event(fs_info->scrub_pause_wait, |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4282 | dev->scrub_ctx == NULL); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4283 | mutex_lock(&fs_info->scrub_lock); |
| 4284 | } |
| 4285 | mutex_unlock(&fs_info->scrub_lock); |
| 4286 | |
| 4287 | return 0; |
| 4288 | } |
Stefan Behrens | 1623ede | 2012-03-27 14:21:26 -0400 | [diff] [blame] | 4289 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 4290 | int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4291 | struct btrfs_scrub_progress *progress) |
| 4292 | { |
Josef Bacik | 562d7b1 | 2021-10-05 16:12:42 -0400 | [diff] [blame] | 4293 | struct btrfs_dev_lookup_args args = { .devid = devid }; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4294 | struct btrfs_device *dev; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4295 | struct scrub_ctx *sctx = NULL; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4296 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4297 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
Josef Bacik | 562d7b1 | 2021-10-05 16:12:42 -0400 | [diff] [blame] | 4298 | dev = btrfs_find_device(fs_info->fs_devices, &args); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4299 | if (dev) |
Anand Jain | cadbc0a | 2018-01-03 16:08:30 +0800 | [diff] [blame] | 4300 | sctx = dev->scrub_ctx; |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4301 | if (sctx) |
| 4302 | memcpy(progress, &sctx->stat, sizeof(*progress)); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 4303 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4304 | |
Stefan Behrens | d9d181c | 2012-11-02 09:58:09 +0100 | [diff] [blame] | 4305 | return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; |
Arne Jansen | a2de733 | 2011-03-08 14:14:00 +0100 | [diff] [blame] | 4306 | } |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4307 | |
| 4308 | static void scrub_remap_extent(struct btrfs_fs_info *fs_info, |
Qu Wenruo | fa485d2 | 2020-12-02 14:48:07 +0800 | [diff] [blame] | 4309 | u64 extent_logical, u32 extent_len, |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4310 | u64 *extent_physical, |
| 4311 | struct btrfs_device **extent_dev, |
| 4312 | int *extent_mirror_num) |
| 4313 | { |
| 4314 | u64 mapped_length; |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 4315 | struct btrfs_io_context *bioc = NULL; |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4316 | int ret; |
| 4317 | |
| 4318 | mapped_length = extent_len; |
Christoph Hellwig | cf8cddd | 2016-10-27 09:27:36 +0200 | [diff] [blame] | 4319 | ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 4320 | &mapped_length, &bioc, 0); |
| 4321 | if (ret || !bioc || mapped_length < extent_len || |
| 4322 | !bioc->stripes[0].dev->bdev) { |
| 4323 | btrfs_put_bioc(bioc); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4324 | return; |
| 4325 | } |
| 4326 | |
Qu Wenruo | 4c66461 | 2021-09-15 15:17:16 +0800 | [diff] [blame] | 4327 | *extent_physical = bioc->stripes[0].physical; |
| 4328 | *extent_mirror_num = bioc->mirror_num; |
| 4329 | *extent_dev = bioc->stripes[0].dev; |
| 4330 | btrfs_put_bioc(bioc); |
Stefan Behrens | ff023aa | 2012-11-06 11:43:11 +0100 | [diff] [blame] | 4331 | } |