blob: 52b39a0924e9f8ebcbc118b5a7f869b7a2b6f042 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Arne Jansena2de7332011-03-08 14:14:00 +01002/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01004 */
5
Arne Jansena2de7332011-03-08 14:14:00 +01006#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +02007#include <linux/ratelimit.h>
David Sterbade2491f2017-05-31 19:21:38 +02008#include <linux/sched/mm.h>
Arne Jansena2de7332011-03-08 14:14:00 +01009#include "ctree.h"
10#include "volumes.h"
11#include "disk-io.h"
12#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020013#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020014#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020015#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010016#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010017#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040018#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050019#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010020
21/*
22 * This is only the first step towards a full-features scrub. It reads all
23 * extent and super block and verifies the checksums. In case a bad checksum
24 * is found or the extent cannot be read, good data will be written back if
25 * any can be found.
26 *
27 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010028 * - In case an unrepairable extent is encountered, track which files are
29 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010030 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010031 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010032 */
33
Stefan Behrensb5d67f62012-03-27 14:21:27 -040034struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010035struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010036
Stefan Behrensff023aa2012-11-06 11:43:11 +010037/*
38 * the following three values only influence the performance.
39 * The last one configures the number of parallel and outstanding I/O
40 * operations. The first two values configure an upper limit for the number
41 * of (dynamically allocated) pages that are added to a bio.
42 */
43#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
44#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
45#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010046
47/*
48 * the following value times PAGE_SIZE needs to be large enough to match the
49 * largest node/leaf/sector size that shall be supported.
50 * Values larger than BTRFS_STRIPE_LEN are not supported.
51 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040052#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010053
Miao Xieaf8e2d12014-10-23 14:42:50 +080054struct scrub_recover {
Elena Reshetova6f615012017-03-03 10:55:21 +020055 refcount_t refs;
Miao Xieaf8e2d12014-10-23 14:42:50 +080056 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080057 u64 map_length;
58};
59
Arne Jansena2de7332011-03-08 14:14:00 +010060struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040061 struct scrub_block *sblock;
62 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020063 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080064 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010065 u64 flags; /* extent flags */
66 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040067 u64 logical;
68 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010069 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080070 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040071 struct {
72 unsigned int mirror_num:8;
73 unsigned int have_csum:1;
74 unsigned int io_error:1;
75 };
Arne Jansena2de7332011-03-08 14:14:00 +010076 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080077
78 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010079};
80
81struct scrub_bio {
82 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010083 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010084 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010085 struct bio *bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020086 blk_status_t status;
Arne Jansena2de7332011-03-08 14:14:00 +010087 u64 logical;
88 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010089#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
91#else
92 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
93#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -040094 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +010095 int next_free;
96 struct btrfs_work work;
97};
98
Stefan Behrensb5d67f62012-03-27 14:21:27 -040099struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100100 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400101 int page_count;
102 atomic_t outstanding_pages;
Elena Reshetova186debd2017-03-03 10:55:23 +0200103 refcount_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100104 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800105 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 struct {
107 unsigned int header_error:1;
108 unsigned int checksum_error:1;
109 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200110 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800111
112 /* The following is for the data used to check parity */
113 /* It is for the data with checksum */
114 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400115 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700116 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400117};
118
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800119/* Used for the chunks with parity stripe such RAID5/6 */
120struct scrub_parity {
121 struct scrub_ctx *sctx;
122
123 struct btrfs_device *scrub_dev;
124
125 u64 logic_start;
126
127 u64 logic_end;
128
129 int nsectors;
130
Liu Bo972d7212017-04-03 13:45:33 -0700131 u64 stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800132
Elena Reshetova78a76452017-03-03 10:55:24 +0200133 refcount_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800134
135 struct list_head spages;
136
137 /* Work of parity check and repair */
138 struct btrfs_work work;
139
140 /* Mark the parity blocks which have data */
141 unsigned long *dbitmap;
142
143 /*
144 * Mark the parity blocks which have data, but errors happen when
145 * read data or check data
146 */
147 unsigned long *ebitmap;
148
149 unsigned long bitmap[0];
150};
151
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100152struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100153 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400154 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100155 int first_free;
156 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100157 atomic_t bios_in_flight;
158 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100159 spinlock_t list_lock;
160 wait_queue_head_t list_wait;
161 u16 csum_size;
162 struct list_head csum_list;
163 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100164 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100165 int pages_per_rd_bio;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100166
167 int is_dev_replace;
David Sterba3fb99302017-05-16 19:10:32 +0200168
169 struct scrub_bio *wr_curr_bio;
170 struct mutex wr_lock;
171 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
David Sterba3fb99302017-05-16 19:10:32 +0200172 struct btrfs_device *wr_tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200173 bool flush_all_writes;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100174
Arne Jansena2de7332011-03-08 14:14:00 +0100175 /*
176 * statistics
177 */
178 struct btrfs_scrub_progress stat;
179 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000180
181 /*
182 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 * decrement bios_in_flight and workers_pending and then do a wakeup
184 * on the list_wait wait queue. We must ensure the main scrub task
185 * doesn't free the scrub context before or while the workers are
186 * doing the wakeup() call.
187 */
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200188 refcount_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100189};
190
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200191struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100192 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100193 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200194 u64 logical;
195 struct btrfs_root *root;
196 struct btrfs_work work;
197 int mirror_num;
198};
199
Josef Bacik652f25a2013-09-12 16:58:28 -0400200struct scrub_nocow_inode {
201 u64 inum;
202 u64 offset;
203 u64 root;
204 struct list_head list;
205};
206
Stefan Behrensff023aa2012-11-06 11:43:11 +0100207struct scrub_copy_nocow_ctx {
208 struct scrub_ctx *sctx;
209 u64 logical;
210 u64 len;
211 int mirror_num;
212 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400213 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100214 struct btrfs_work work;
215};
216
Jan Schmidt558540c2011-06-13 19:59:12 +0200217struct scrub_warning {
218 struct btrfs_path *path;
219 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200220 const char *errstr;
David Sterba6aa21262017-10-04 17:07:07 +0200221 u64 physical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200222 u64 logical;
223 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200224};
225
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800226struct full_stripe_lock {
227 struct rb_node node;
228 u64 logical;
229 u64 refs;
230 struct mutex mutex;
231};
232
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100233static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
234static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
235static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
236static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400237static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800238static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100239 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100240static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +0800241 struct scrub_block *sblock,
242 int retry_failed_mirror);
Zhao Leiba7cf982015-08-24 21:18:02 +0800243static void scrub_recheck_block_checksum(struct scrub_block *sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400244static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800245 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400246static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
247 struct scrub_block *sblock_good,
248 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100249static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
250static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
251 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400252static int scrub_checksum_data(struct scrub_block *sblock);
253static int scrub_checksum_tree_block(struct scrub_block *sblock);
254static int scrub_checksum_super(struct scrub_block *sblock);
255static void scrub_block_get(struct scrub_block *sblock);
256static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100257static void scrub_page_get(struct scrub_page *spage);
258static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800259static void scrub_parity_get(struct scrub_parity *sparity);
260static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100261static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
262 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100263static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100264 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100265 u64 gen, int mirror_num, u8 *csum, int force,
266 u64 physical_for_dev_replace);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200267static void scrub_bio_end_io(struct bio *bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400268static void scrub_bio_end_io_worker(struct btrfs_work *work);
269static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100270static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
271 u64 extent_logical, u64 extent_len,
272 u64 *extent_physical,
273 struct btrfs_device **extent_dev,
274 int *extent_mirror_num);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100275static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
276 struct scrub_page *spage);
277static void scrub_wr_submit(struct scrub_ctx *sctx);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200278static void scrub_wr_bio_end_io(struct bio *bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100279static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
280static int write_page_nocow(struct scrub_ctx *sctx,
281 u64 physical_for_dev_replace, struct page *page);
282static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400283 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100284static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
285 int mirror_num, u64 physical_for_dev_replace);
286static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800287static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800288static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000289static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400290
Liu Bo762221f2018-01-02 13:36:42 -0700291static inline int scrub_is_page_on_raid56(struct scrub_page *page)
292{
293 return page->recover &&
294 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
295}
Stefan Behrens1623ede2012-03-27 14:21:26 -0400296
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100297static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
298{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200299 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100300 atomic_inc(&sctx->bios_in_flight);
301}
302
303static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
304{
305 atomic_dec(&sctx->bios_in_flight);
306 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000307 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100308}
309
Wang Shilongcb7ab022013-12-04 21:16:53 +0800310static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800311{
312 while (atomic_read(&fs_info->scrub_pause_req)) {
313 mutex_unlock(&fs_info->scrub_lock);
314 wait_event(fs_info->scrub_pause_wait,
315 atomic_read(&fs_info->scrub_pause_req) == 0);
316 mutex_lock(&fs_info->scrub_lock);
317 }
318}
319
Zhaolei0e22be82015-08-05 16:43:28 +0800320static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800321{
322 atomic_inc(&fs_info->scrubs_paused);
323 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800324}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800325
Zhaolei0e22be82015-08-05 16:43:28 +0800326static void scrub_pause_off(struct btrfs_fs_info *fs_info)
327{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800328 mutex_lock(&fs_info->scrub_lock);
329 __scrub_blocked_if_needed(fs_info);
330 atomic_dec(&fs_info->scrubs_paused);
331 mutex_unlock(&fs_info->scrub_lock);
332
333 wake_up(&fs_info->scrub_pause_wait);
334}
335
Zhaolei0e22be82015-08-05 16:43:28 +0800336static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
337{
338 scrub_pause_on(fs_info);
339 scrub_pause_off(fs_info);
340}
341
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100342/*
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800343 * Insert new full stripe lock into full stripe locks tree
344 *
345 * Return pointer to existing or newly inserted full_stripe_lock structure if
346 * everything works well.
347 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
348 *
349 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
350 * function
351 */
352static struct full_stripe_lock *insert_full_stripe_lock(
353 struct btrfs_full_stripe_locks_tree *locks_root,
354 u64 fstripe_logical)
355{
356 struct rb_node **p;
357 struct rb_node *parent = NULL;
358 struct full_stripe_lock *entry;
359 struct full_stripe_lock *ret;
360
David Sterbaa32bf9a2018-03-16 02:21:22 +0100361 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800362
363 p = &locks_root->root.rb_node;
364 while (*p) {
365 parent = *p;
366 entry = rb_entry(parent, struct full_stripe_lock, node);
367 if (fstripe_logical < entry->logical) {
368 p = &(*p)->rb_left;
369 } else if (fstripe_logical > entry->logical) {
370 p = &(*p)->rb_right;
371 } else {
372 entry->refs++;
373 return entry;
374 }
375 }
376
377 /* Insert new lock */
378 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
379 if (!ret)
380 return ERR_PTR(-ENOMEM);
381 ret->logical = fstripe_logical;
382 ret->refs = 1;
383 mutex_init(&ret->mutex);
384
385 rb_link_node(&ret->node, parent, p);
386 rb_insert_color(&ret->node, &locks_root->root);
387 return ret;
388}
389
390/*
391 * Search for a full stripe lock of a block group
392 *
393 * Return pointer to existing full stripe lock if found
394 * Return NULL if not found
395 */
396static struct full_stripe_lock *search_full_stripe_lock(
397 struct btrfs_full_stripe_locks_tree *locks_root,
398 u64 fstripe_logical)
399{
400 struct rb_node *node;
401 struct full_stripe_lock *entry;
402
David Sterbaa32bf9a2018-03-16 02:21:22 +0100403 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800404
405 node = locks_root->root.rb_node;
406 while (node) {
407 entry = rb_entry(node, struct full_stripe_lock, node);
408 if (fstripe_logical < entry->logical)
409 node = node->rb_left;
410 else if (fstripe_logical > entry->logical)
411 node = node->rb_right;
412 else
413 return entry;
414 }
415 return NULL;
416}
417
418/*
419 * Helper to get full stripe logical from a normal bytenr.
420 *
421 * Caller must ensure @cache is a RAID56 block group.
422 */
423static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
424 u64 bytenr)
425{
426 u64 ret;
427
428 /*
429 * Due to chunk item size limit, full stripe length should not be
430 * larger than U32_MAX. Just a sanity check here.
431 */
432 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
433
434 /*
435 * round_down() can only handle power of 2, while RAID56 full
436 * stripe length can be 64KiB * n, so we need to manually round down.
437 */
438 ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
439 cache->full_stripe_len + cache->key.objectid;
440 return ret;
441}
442
443/*
444 * Lock a full stripe to avoid concurrency of recovery and read
445 *
446 * It's only used for profiles with parities (RAID5/6), for other profiles it
447 * does nothing.
448 *
449 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
450 * So caller must call unlock_full_stripe() at the same context.
451 *
452 * Return <0 if encounters error.
453 */
454static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
455 bool *locked_ret)
456{
457 struct btrfs_block_group_cache *bg_cache;
458 struct btrfs_full_stripe_locks_tree *locks_root;
459 struct full_stripe_lock *existing;
460 u64 fstripe_start;
461 int ret = 0;
462
463 *locked_ret = false;
464 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
465 if (!bg_cache) {
466 ASSERT(0);
467 return -ENOENT;
468 }
469
470 /* Profiles not based on parity don't need full stripe lock */
471 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
472 goto out;
473 locks_root = &bg_cache->full_stripe_locks_root;
474
475 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
476
477 /* Now insert the full stripe lock */
478 mutex_lock(&locks_root->lock);
479 existing = insert_full_stripe_lock(locks_root, fstripe_start);
480 mutex_unlock(&locks_root->lock);
481 if (IS_ERR(existing)) {
482 ret = PTR_ERR(existing);
483 goto out;
484 }
485 mutex_lock(&existing->mutex);
486 *locked_ret = true;
487out:
488 btrfs_put_block_group(bg_cache);
489 return ret;
490}
491
492/*
493 * Unlock a full stripe.
494 *
495 * NOTE: Caller must ensure it's the same context calling corresponding
496 * lock_full_stripe().
497 *
498 * Return 0 if we unlock full stripe without problem.
499 * Return <0 for error
500 */
501static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
502 bool locked)
503{
504 struct btrfs_block_group_cache *bg_cache;
505 struct btrfs_full_stripe_locks_tree *locks_root;
506 struct full_stripe_lock *fstripe_lock;
507 u64 fstripe_start;
508 bool freeit = false;
509 int ret = 0;
510
511 /* If we didn't acquire full stripe lock, no need to continue */
512 if (!locked)
513 return 0;
514
515 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
516 if (!bg_cache) {
517 ASSERT(0);
518 return -ENOENT;
519 }
520 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
521 goto out;
522
523 locks_root = &bg_cache->full_stripe_locks_root;
524 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
525
526 mutex_lock(&locks_root->lock);
527 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
528 /* Unpaired unlock_full_stripe() detected */
529 if (!fstripe_lock) {
530 WARN_ON(1);
531 ret = -ENOENT;
532 mutex_unlock(&locks_root->lock);
533 goto out;
534 }
535
536 if (fstripe_lock->refs == 0) {
537 WARN_ON(1);
538 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
539 fstripe_lock->logical);
540 } else {
541 fstripe_lock->refs--;
542 }
543
544 if (fstripe_lock->refs == 0) {
545 rb_erase(&fstripe_lock->node, &locks_root->root);
546 freeit = true;
547 }
548 mutex_unlock(&locks_root->lock);
549
550 mutex_unlock(&fstripe_lock->mutex);
551 if (freeit)
552 kfree(fstripe_lock);
553out:
554 btrfs_put_block_group(bg_cache);
555 return ret;
556}
557
558/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100559 * used for workers that require transaction commits (i.e., for the
560 * NOCOW case)
561 */
562static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
563{
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400564 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100565
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200566 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100567 /*
568 * increment scrubs_running to prevent cancel requests from
569 * completing as long as a worker is running. we must also
570 * increment scrubs_paused to prevent deadlocking on pause
571 * requests used for transactions commits (as the worker uses a
572 * transaction context). it is safe to regard the worker
573 * as paused for all matters practical. effectively, we only
574 * avoid cancellation requests from completing.
575 */
576 mutex_lock(&fs_info->scrub_lock);
577 atomic_inc(&fs_info->scrubs_running);
578 atomic_inc(&fs_info->scrubs_paused);
579 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800580
581 /*
582 * check if @scrubs_running=@scrubs_paused condition
583 * inside wait_event() is not an atomic operation.
584 * which means we may inc/dec @scrub_running/paused
585 * at any time. Let's wake up @scrub_pause_wait as
586 * much as we can to let commit transaction blocked less.
587 */
588 wake_up(&fs_info->scrub_pause_wait);
589
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100590 atomic_inc(&sctx->workers_pending);
591}
592
593/* used for workers that require transaction commits */
594static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
595{
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400596 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100597
598 /*
599 * see scrub_pending_trans_workers_inc() why we're pretending
600 * to be paused in the scrub counters
601 */
602 mutex_lock(&fs_info->scrub_lock);
603 atomic_dec(&fs_info->scrubs_running);
604 atomic_dec(&fs_info->scrubs_paused);
605 mutex_unlock(&fs_info->scrub_lock);
606 atomic_dec(&sctx->workers_pending);
607 wake_up(&fs_info->scrub_pause_wait);
608 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000609 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100610}
611
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100612static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100613{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100614 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100615 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100616 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100617 struct btrfs_ordered_sum, list);
618 list_del(&sum->list);
619 kfree(sum);
620 }
621}
622
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100623static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100624{
625 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100626
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100627 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100628 return;
629
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400630 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100631 if (sctx->curr != -1) {
632 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400633
634 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100635 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400636 scrub_block_put(sbio->pagev[i]->sblock);
637 }
638 bio_put(sbio->bio);
639 }
640
Stefan Behrensff023aa2012-11-06 11:43:11 +0100641 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100642 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100643
644 if (!sbio)
645 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100646 kfree(sbio);
647 }
648
David Sterba3fb99302017-05-16 19:10:32 +0200649 kfree(sctx->wr_curr_bio);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100650 scrub_free_csums(sctx);
651 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100652}
653
Filipe Mananaf55985f2015-02-09 21:14:24 +0000654static void scrub_put_ctx(struct scrub_ctx *sctx)
655{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200656 if (refcount_dec_and_test(&sctx->refs))
Filipe Mananaf55985f2015-02-09 21:14:24 +0000657 scrub_free_ctx(sctx);
658}
659
Arne Jansena2de7332011-03-08 14:14:00 +0100660static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100661struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100662{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100663 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100664 int i;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400665 struct btrfs_fs_info *fs_info = dev->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100666
David Sterba58c4e172016-02-11 10:49:42 +0100667 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100668 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100669 goto nomem;
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200670 refcount_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100671 sctx->is_dev_replace = is_dev_replace;
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200672 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100673 sctx->curr = -1;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400674 sctx->fs_info = dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100675 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100676 struct scrub_bio *sbio;
677
David Sterba58c4e172016-02-11 10:49:42 +0100678 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
Arne Jansena2de7332011-03-08 14:14:00 +0100679 if (!sbio)
680 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100681 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100682
Arne Jansena2de7332011-03-08 14:14:00 +0100683 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100684 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400685 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800686 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
687 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100688
Stefan Behrensff023aa2012-11-06 11:43:11 +0100689 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100690 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200691 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100692 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100693 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100694 sctx->first_free = 0;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100695 atomic_set(&sctx->bios_in_flight, 0);
696 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100697 atomic_set(&sctx->cancel_req, 0);
698 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
699 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100700
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100701 spin_lock_init(&sctx->list_lock);
702 spin_lock_init(&sctx->stat_lock);
703 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100704
David Sterba3fb99302017-05-16 19:10:32 +0200705 WARN_ON(sctx->wr_curr_bio != NULL);
706 mutex_init(&sctx->wr_lock);
707 sctx->wr_curr_bio = NULL;
David Sterba8fcdac32017-05-16 19:10:23 +0200708 if (is_dev_replace) {
David Sterbaded56182017-06-26 15:19:00 +0200709 WARN_ON(!fs_info->dev_replace.tgtdev);
David Sterba3fb99302017-05-16 19:10:32 +0200710 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
David Sterbaded56182017-06-26 15:19:00 +0200711 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200712 sctx->flush_all_writes = false;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100713 }
David Sterba8fcdac32017-05-16 19:10:23 +0200714
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100715 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100716
717nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100718 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100719 return ERR_PTR(-ENOMEM);
720}
721
Stefan Behrensff023aa2012-11-06 11:43:11 +0100722static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
723 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200724{
725 u64 isize;
726 u32 nlink;
727 int ret;
728 int i;
David Sterbade2491f2017-05-31 19:21:38 +0200729 unsigned nofs_flag;
Jan Schmidt558540c2011-06-13 19:59:12 +0200730 struct extent_buffer *eb;
731 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100732 struct scrub_warning *swarn = warn_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400733 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200734 struct inode_fs_paths *ipath = NULL;
735 struct btrfs_root *local_root;
736 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100737 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200738
739 root_key.objectid = root;
740 root_key.type = BTRFS_ROOT_ITEM_KEY;
741 root_key.offset = (u64)-1;
742 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
743 if (IS_ERR(local_root)) {
744 ret = PTR_ERR(local_root);
745 goto err;
746 }
747
David Sterba14692cc2015-01-02 18:55:46 +0100748 /*
749 * this makes the path point to (inum INODE_ITEM ioff)
750 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100751 key.objectid = inum;
752 key.type = BTRFS_INODE_ITEM_KEY;
753 key.offset = 0;
754
755 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200756 if (ret) {
757 btrfs_release_path(swarn->path);
758 goto err;
759 }
760
761 eb = swarn->path->nodes[0];
762 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
763 struct btrfs_inode_item);
764 isize = btrfs_inode_size(eb, inode_item);
765 nlink = btrfs_inode_nlink(eb, inode_item);
766 btrfs_release_path(swarn->path);
767
David Sterbade2491f2017-05-31 19:21:38 +0200768 /*
769 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
770 * uses GFP_NOFS in this context, so we keep it consistent but it does
771 * not seem to be strictly necessary.
772 */
773 nofs_flag = memalloc_nofs_save();
Jan Schmidt558540c2011-06-13 19:59:12 +0200774 ipath = init_ipath(4096, local_root, swarn->path);
David Sterbade2491f2017-05-31 19:21:38 +0200775 memalloc_nofs_restore(nofs_flag);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300776 if (IS_ERR(ipath)) {
777 ret = PTR_ERR(ipath);
778 ipath = NULL;
779 goto err;
780 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200781 ret = paths_from_inode(inum, ipath);
782
783 if (ret < 0)
784 goto err;
785
786 /*
787 * we deliberately ignore the bit ipath might have been too small to
788 * hold all of the paths here
789 */
790 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400791 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200792"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400793 swarn->errstr, swarn->logical,
794 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200795 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400796 root, inum, offset,
797 min(isize - offset, (u64)PAGE_SIZE), nlink,
798 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200799
800 free_ipath(ipath);
801 return 0;
802
803err:
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400804 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200805 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400806 swarn->errstr, swarn->logical,
807 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200808 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400809 root, inum, offset, ret);
Jan Schmidt558540c2011-06-13 19:59:12 +0200810
811 free_ipath(ipath);
812 return 0;
813}
814
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400815static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200816{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100817 struct btrfs_device *dev;
818 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200819 struct btrfs_path *path;
820 struct btrfs_key found_key;
821 struct extent_buffer *eb;
822 struct btrfs_extent_item *ei;
823 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200824 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100825 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600826 u64 flags = 0;
827 u64 ref_root;
828 u32 item_size;
Dan Carpenter07c9a8e2016-03-11 11:08:56 +0300829 u8 ref_level = 0;
Liu Bo69917e42012-09-07 20:01:28 -0600830 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200831
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100832 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100833 dev = sblock->pagev[0]->dev;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400834 fs_info = sblock->sctx->fs_info;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100835
Jan Schmidt558540c2011-06-13 19:59:12 +0200836 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200837 if (!path)
838 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200839
David Sterba6aa21262017-10-04 17:07:07 +0200840 swarn.physical = sblock->pagev[0]->physical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100841 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200842 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100843 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200844
Liu Bo69917e42012-09-07 20:01:28 -0600845 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
846 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200847 if (ret < 0)
848 goto out;
849
Jan Schmidt4692cf52011-12-02 14:56:41 +0100850 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200851 swarn.extent_item_size = found_key.offset;
852
853 eb = path->nodes[0];
854 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
855 item_size = btrfs_item_size_nr(eb, path->slots[0]);
856
Liu Bo69917e42012-09-07 20:01:28 -0600857 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200858 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800859 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
860 item_size, &ref_root,
861 &ref_level);
David Sterbaecaeb142015-10-08 09:01:03 +0200862 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200863"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400864 errstr, swarn.logical,
Josef Bacik606686e2012-06-04 14:03:51 -0400865 rcu_str_deref(dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200866 swarn.physical,
Jan Schmidt558540c2011-06-13 19:59:12 +0200867 ref_level ? "node" : "leaf",
868 ret < 0 ? -1 : ref_level,
869 ret < 0 ? -1 : ref_root);
870 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600871 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200872 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600873 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200874 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100875 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100876 iterate_extent_inodes(fs_info, found_key.objectid,
877 extent_item_pos, 1,
Zygo Blaxellc995ab32017-09-22 13:58:45 -0400878 scrub_print_warning_inode, &swarn, false);
Jan Schmidt558540c2011-06-13 19:59:12 +0200879 }
880
881out:
882 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200883}
884
Stefan Behrensff023aa2012-11-06 11:43:11 +0100885static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200886{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200887 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200888 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100889 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200890 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200891 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200892 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200893 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000894 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200895 u64 end = offset + PAGE_SIZE - 1;
896 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000897 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200898
899 key.objectid = root;
900 key.type = BTRFS_ROOT_ITEM_KEY;
901 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000902
903 fs_info = fixup->root->fs_info;
904 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
905
906 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
907 if (IS_ERR(local_root)) {
908 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200909 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000910 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200911
912 key.type = BTRFS_INODE_ITEM_KEY;
913 key.objectid = inum;
914 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000915 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
916 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200917 if (IS_ERR(inode))
918 return PTR_ERR(inode);
919
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300920 index = offset >> PAGE_SHIFT;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200921
922 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200923 if (!page) {
924 ret = -ENOMEM;
925 goto out;
926 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200927
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200928 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200929 if (PageDirty(page)) {
930 /*
931 * we need to write the data to the defect sector. the
932 * data that was in that sector is not in memory,
933 * because the page was modified. we must not write the
934 * modified page to that sector.
935 *
936 * TODO: what could be done here: wait for the delalloc
937 * runner to write out that page (might involve
938 * COW) and see whether the sector is still
939 * referenced afterwards.
940 *
941 * For the meantime, we'll treat this error
942 * incorrectable, although there is a chance that a
943 * later scrub will find the bad sector again and that
944 * there's no dirty page in memory, then.
945 */
946 ret = -EIO;
947 goto out;
948 }
Josef Bacik6ec656b2017-05-05 11:57:14 -0400949 ret = repair_io_failure(fs_info, inum, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200950 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800951 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200952 fixup->mirror_num);
953 unlock_page(page);
954 corrected = !ret;
955 } else {
956 /*
957 * we need to get good data first. the general readpage path
958 * will call repair_io_failure for us, we just have to make
959 * sure we read the bad mirror.
960 */
961 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterbaceeb0ae2016-04-26 23:54:39 +0200962 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200963 if (ret) {
964 /* set_extent_bits should give proper error */
965 WARN_ON(ret > 0);
966 if (ret > 0)
967 ret = -EFAULT;
968 goto out;
969 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200970
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200971 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
972 btrfs_get_extent,
973 fixup->mirror_num);
974 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200975
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200976 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
977 end, EXTENT_DAMAGED, 0, NULL);
978 if (!corrected)
979 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
David Sterba91166212016-04-26 23:54:39 +0200980 EXTENT_DAMAGED);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200981 }
982
983out:
984 if (page)
985 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200986
987 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200988
989 if (ret < 0)
990 return ret;
991
992 if (ret == 0 && corrected) {
993 /*
994 * we only need to call readpage for one of the inodes belonging
995 * to this extent. so make iterate_extent_inodes stop
996 */
997 return 1;
998 }
999
1000 return -EIO;
1001}
1002
1003static void scrub_fixup_nodatasum(struct btrfs_work *work)
1004{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001005 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001006 int ret;
1007 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001008 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001009 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001010 struct btrfs_path *path;
1011 int uncorrectable = 0;
1012
1013 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001014 sctx = fixup->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001015 fs_info = fixup->root->fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001016
1017 path = btrfs_alloc_path();
1018 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001019 spin_lock(&sctx->stat_lock);
1020 ++sctx->stat.malloc_errors;
1021 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001022 uncorrectable = 1;
1023 goto out;
1024 }
1025
1026 trans = btrfs_join_transaction(fixup->root);
1027 if (IS_ERR(trans)) {
1028 uncorrectable = 1;
1029 goto out;
1030 }
1031
1032 /*
1033 * the idea is to trigger a regular read through the standard path. we
1034 * read a page from the (failed) logical address by specifying the
1035 * corresponding copynum of the failed sector. thus, that readpage is
1036 * expected to fail.
1037 * that is the point where on-the-fly error correction will kick in
1038 * (once it's finished) and rewrite the failed sector if a good copy
1039 * can be found.
1040 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001041 ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
Zygo Blaxellc995ab32017-09-22 13:58:45 -04001042 scrub_fixup_readpage, fixup, false);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001043 if (ret < 0) {
1044 uncorrectable = 1;
1045 goto out;
1046 }
1047 WARN_ON(ret != 1);
1048
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001049 spin_lock(&sctx->stat_lock);
1050 ++sctx->stat.corrected_errors;
1051 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001052
1053out:
1054 if (trans && !IS_ERR(trans))
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001055 btrfs_end_transaction(trans);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001056 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001057 spin_lock(&sctx->stat_lock);
1058 ++sctx->stat.uncorrectable_errors;
1059 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001060 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001061 &fs_info->dev_replace.num_uncorrectable_read_errors);
1062 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02001063 "unable to fixup (nodatasum) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001064 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001065 }
1066
1067 btrfs_free_path(path);
1068 kfree(fixup);
1069
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001070 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02001071}
1072
Miao Xieaf8e2d12014-10-23 14:42:50 +08001073static inline void scrub_get_recover(struct scrub_recover *recover)
1074{
Elena Reshetova6f615012017-03-03 10:55:21 +02001075 refcount_inc(&recover->refs);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001076}
1077
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001078static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
1079 struct scrub_recover *recover)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001080{
Elena Reshetova6f615012017-03-03 10:55:21 +02001081 if (refcount_dec_and_test(&recover->refs)) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001082 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08001083 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001084 kfree(recover);
1085 }
1086}
1087
Arne Jansena2de7332011-03-08 14:14:00 +01001088/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001089 * scrub_handle_errored_block gets called when either verification of the
1090 * pages failed or the bio failed to read, e.g. with EIO. In the latter
1091 * case, this function handles all pages in the bio, even though only one
1092 * may be bad.
1093 * The goal of this function is to repair the errored block by using the
1094 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +01001095 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001096static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +01001097{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001098 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001099 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001100 struct btrfs_fs_info *fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001101 u64 logical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001102 unsigned int failed_mirror_index;
1103 unsigned int is_metadata;
1104 unsigned int have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001105 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
1106 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +01001107 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001108 int mirror_index;
1109 int page_num;
1110 int success;
Qu Wenruo28d70e22017-04-14 08:35:55 +08001111 bool full_stripe_locked;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001112 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1113 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +01001114
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001115 BUG_ON(sblock_to_check->page_count < 1);
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001116 fs_info = sctx->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +00001117 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1118 /*
1119 * if we find an error in a super block, we just report it.
1120 * They will get written with the next transaction commit
1121 * anyway
1122 */
1123 spin_lock(&sctx->stat_lock);
1124 ++sctx->stat.super_errors;
1125 spin_unlock(&sctx->stat_lock);
1126 return 0;
1127 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001128 logical = sblock_to_check->pagev[0]->logical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001129 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
1130 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
1131 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001132 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001133 have_csum = sblock_to_check->pagev[0]->have_csum;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001134 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001135
Qu Wenruo28d70e22017-04-14 08:35:55 +08001136 /*
1137 * For RAID5/6, race can happen for a different device scrub thread.
1138 * For data corruption, Parity and Data threads will both try
1139 * to recovery the data.
1140 * Race can lead to doubly added csum error, or even unrecoverable
1141 * error.
1142 */
1143 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1144 if (ret < 0) {
1145 spin_lock(&sctx->stat_lock);
1146 if (ret == -ENOMEM)
1147 sctx->stat.malloc_errors++;
1148 sctx->stat.read_errors++;
1149 sctx->stat.uncorrectable_errors++;
1150 spin_unlock(&sctx->stat_lock);
1151 return ret;
1152 }
1153
Stefan Behrensff023aa2012-11-06 11:43:11 +01001154 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
1155 sblocks_for_recheck = NULL;
1156 goto nodatasum_case;
1157 }
1158
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001159 /*
1160 * read all mirrors one after the other. This includes to
1161 * re-read the extent or metadata block that failed (that was
1162 * the cause that this fixup code is called) another time,
1163 * page by page this time in order to know which pages
1164 * caused I/O errors and which ones are good (for all mirrors).
1165 * It is the goal to handle the situation when more than one
1166 * mirror contains I/O errors, but the errors do not
1167 * overlap, i.e. the data can be repaired by selecting the
1168 * pages from those mirrors without I/O error on the
1169 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
1170 * would be that mirror #1 has an I/O error on the first page,
1171 * the second page is good, and mirror #2 has an I/O error on
1172 * the second page, but the first page is good.
1173 * Then the first page of the first mirror can be repaired by
1174 * taking the first page of the second mirror, and the
1175 * second page of the second mirror can be repaired by
1176 * copying the contents of the 2nd page of the 1st mirror.
1177 * One more note: if the pages of one mirror contain I/O
1178 * errors, the checksum cannot be verified. In order to get
1179 * the best data for repairing, the first attempt is to find
1180 * a mirror without I/O errors and with a validated checksum.
1181 * Only if this is not possible, the pages are picked from
1182 * mirrors with I/O errors without considering the checksum.
1183 * If the latter is the case, at the end, the checksum of the
1184 * repaired area is verified in order to correctly maintain
1185 * the statistics.
1186 */
1187
David Sterba31e818f2015-02-20 18:00:26 +01001188 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
1189 sizeof(*sblocks_for_recheck), GFP_NOFS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001190 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001191 spin_lock(&sctx->stat_lock);
1192 sctx->stat.malloc_errors++;
1193 sctx->stat.read_errors++;
1194 sctx->stat.uncorrectable_errors++;
1195 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001196 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001197 goto out;
1198 }
1199
1200 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +08001201 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001202 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001203 spin_lock(&sctx->stat_lock);
1204 sctx->stat.read_errors++;
1205 sctx->stat.uncorrectable_errors++;
1206 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001207 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001208 goto out;
1209 }
1210 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1211 sblock_bad = sblocks_for_recheck + failed_mirror_index;
1212
1213 /* build and submit the bios for the failed mirror, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001214 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001215
1216 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1217 sblock_bad->no_io_error_seen) {
1218 /*
1219 * the error disappeared after reading page by page, or
1220 * the area was part of a huge bio and other parts of the
1221 * bio caused I/O errors, or the block layer merged several
1222 * read requests into one and the error is caused by a
1223 * different bio (usually one of the two latter cases is
1224 * the cause)
1225 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001226 spin_lock(&sctx->stat_lock);
1227 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001228 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001229 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001230
Stefan Behrensff023aa2012-11-06 11:43:11 +01001231 if (sctx->is_dev_replace)
1232 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001233 goto out;
1234 }
1235
1236 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001237 spin_lock(&sctx->stat_lock);
1238 sctx->stat.read_errors++;
1239 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001240 if (__ratelimit(&_rs))
1241 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001242 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001243 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001244 spin_lock(&sctx->stat_lock);
1245 sctx->stat.csum_errors++;
1246 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001247 if (__ratelimit(&_rs))
1248 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001249 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001250 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001251 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001252 spin_lock(&sctx->stat_lock);
1253 sctx->stat.verify_errors++;
1254 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001255 if (__ratelimit(&_rs))
1256 scrub_print_warning("checksum/header error",
1257 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001258 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001259 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001260 BTRFS_DEV_STAT_GENERATION_ERRS);
1261 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001262 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001263 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001264 }
1265
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001266 if (sctx->readonly) {
1267 ASSERT(!sctx->is_dev_replace);
1268 goto out;
1269 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001270
1271 if (!is_metadata && !have_csum) {
1272 struct scrub_fixup_nodatasum *fixup_nodatasum;
1273
Stefan Behrensff023aa2012-11-06 11:43:11 +01001274 WARN_ON(sctx->is_dev_replace);
1275
Zhao Leib25c94c2015-01-20 15:11:35 +08001276nodatasum_case:
1277
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001278 /*
1279 * !is_metadata and !have_csum, this means that the data
Nicholas D Steeves01327612016-05-19 21:18:45 -04001280 * might not be COWed, that it might be modified
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001281 * concurrently. The general strategy to work on the
1282 * commit root does not help in the case when COW is not
1283 * used.
1284 */
1285 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1286 if (!fixup_nodatasum)
1287 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001288 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001289 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001290 fixup_nodatasum->logical = logical;
1291 fixup_nodatasum->root = fs_info->extent_root;
1292 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001293 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001294 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1295 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001296 btrfs_queue_work(fs_info->scrub_workers,
1297 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001298 goto out;
1299 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001300
1301 /*
1302 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001303 * checksums.
1304 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001305 * errors and also does not have a checksum error.
1306 * If one is found, and if a checksum is present, the full block
1307 * that is known to contain an error is rewritten. Afterwards
1308 * the block is known to be corrected.
1309 * If a mirror is found which is completely correct, and no
1310 * checksum is present, only those pages are rewritten that had
1311 * an I/O error in the block to be repaired, since it cannot be
1312 * determined, which copy of the other pages is better (and it
1313 * could happen otherwise that a correct page would be
1314 * overwritten by a bad one).
1315 */
Liu Bo762221f2018-01-02 13:36:42 -07001316 for (mirror_index = 0; ;mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001317 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001318
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001319 if (mirror_index == failed_mirror_index)
1320 continue;
Liu Bo762221f2018-01-02 13:36:42 -07001321
1322 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1323 if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1324 if (mirror_index >= BTRFS_MAX_MIRRORS)
1325 break;
1326 if (!sblocks_for_recheck[mirror_index].page_count)
1327 break;
1328
1329 sblock_other = sblocks_for_recheck + mirror_index;
1330 } else {
1331 struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1332 int max_allowed = r->bbio->num_stripes -
1333 r->bbio->num_tgtdevs;
1334
1335 if (mirror_index >= max_allowed)
1336 break;
1337 if (!sblocks_for_recheck[1].page_count)
1338 break;
1339
1340 ASSERT(failed_mirror_index == 0);
1341 sblock_other = sblocks_for_recheck + 1;
1342 sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1343 }
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001344
1345 /* build and submit the bios, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001346 scrub_recheck_block(fs_info, sblock_other, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001347
1348 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001349 !sblock_other->checksum_error &&
1350 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001351 if (sctx->is_dev_replace) {
1352 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001353 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001354 } else {
1355 ret = scrub_repair_block_from_good_copy(
1356 sblock_bad, sblock_other);
1357 if (!ret)
1358 goto corrected_error;
1359 }
Arne Jansena2de7332011-03-08 14:14:00 +01001360 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001361 }
1362
Zhao Leib968fed2015-01-20 15:11:41 +08001363 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1364 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001365
1366 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001367 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001368 * repaired, continue by picking good copies of those pages.
1369 * Select the good pages from mirrors to rewrite bad pages from
1370 * the area to fix. Afterwards verify the checksum of the block
1371 * that is supposed to be repaired. This verification step is
1372 * only done for the purpose of statistic counting and for the
1373 * final scrub report, whether errors remain.
1374 * A perfect algorithm could make use of the checksum and try
1375 * all possible combinations of pages from the different mirrors
1376 * until the checksum verification succeeds. For example, when
1377 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1378 * of mirror #2 is readable but the final checksum test fails,
1379 * then the 2nd page of mirror #3 could be tried, whether now
Nicholas D Steeves01327612016-05-19 21:18:45 -04001380 * the final checksum succeeds. But this would be a rare
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001381 * exception and is therefore not implemented. At least it is
1382 * avoided that the good copy is overwritten.
1383 * A more useful improvement would be to pick the sectors
1384 * without I/O error based on sector sizes (512 bytes on legacy
1385 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1386 * mirror could be repaired by taking 512 byte of a different
1387 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1388 * area are unreadable.
1389 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001390 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001391 for (page_num = 0; page_num < sblock_bad->page_count;
1392 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001393 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001394 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395
Zhao Leib968fed2015-01-20 15:11:41 +08001396 /* skip no-io-error page in scrub */
1397 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001398 continue;
1399
Liu Bo47597002018-03-02 16:10:41 -07001400 if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1401 /*
1402 * In case of dev replace, if raid56 rebuild process
1403 * didn't work out correct data, then copy the content
1404 * in sblock_bad to make sure target device is identical
1405 * to source device, instead of writing garbage data in
1406 * sblock_for_recheck array to target device.
1407 */
1408 sblock_other = NULL;
1409 } else if (page_bad->io_error) {
1410 /* try to find no-io-error page in mirrors */
Zhao Leib968fed2015-01-20 15:11:41 +08001411 for (mirror_index = 0;
1412 mirror_index < BTRFS_MAX_MIRRORS &&
1413 sblocks_for_recheck[mirror_index].page_count > 0;
1414 mirror_index++) {
1415 if (!sblocks_for_recheck[mirror_index].
1416 pagev[page_num]->io_error) {
1417 sblock_other = sblocks_for_recheck +
1418 mirror_index;
1419 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001420 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001421 }
Zhao Leib968fed2015-01-20 15:11:41 +08001422 if (!sblock_other)
1423 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001424 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001425
Zhao Leib968fed2015-01-20 15:11:41 +08001426 if (sctx->is_dev_replace) {
1427 /*
1428 * did not find a mirror to fetch the page
1429 * from. scrub_write_page_to_dev_replace()
1430 * handles this case (page->io_error), by
1431 * filling the block with zeros before
1432 * submitting the write request
1433 */
1434 if (!sblock_other)
1435 sblock_other = sblock_bad;
1436
1437 if (scrub_write_page_to_dev_replace(sblock_other,
1438 page_num) != 0) {
1439 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001440 &fs_info->dev_replace.num_write_errors);
Zhao Leib968fed2015-01-20 15:11:41 +08001441 success = 0;
1442 }
1443 } else if (sblock_other) {
1444 ret = scrub_repair_page_from_good_copy(sblock_bad,
1445 sblock_other,
1446 page_num, 0);
1447 if (0 == ret)
1448 page_bad->io_error = 0;
1449 else
1450 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001451 }
1452 }
1453
Zhao Leib968fed2015-01-20 15:11:41 +08001454 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001455 if (is_metadata || have_csum) {
1456 /*
1457 * need to verify the checksum now that all
1458 * sectors on disk are repaired (the write
1459 * request for data to be repaired is on its way).
1460 * Just be lazy and use scrub_recheck_block()
1461 * which re-reads the data before the checksum
1462 * is verified, but most likely the data comes out
1463 * of the page cache.
1464 */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001465 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001466 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001467 !sblock_bad->checksum_error &&
1468 sblock_bad->no_io_error_seen)
1469 goto corrected_error;
1470 else
1471 goto did_not_correct_error;
1472 } else {
1473corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001474 spin_lock(&sctx->stat_lock);
1475 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001476 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001477 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001478 btrfs_err_rl_in_rcu(fs_info,
1479 "fixed up error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001480 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001481 }
1482 } else {
1483did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001484 spin_lock(&sctx->stat_lock);
1485 sctx->stat.uncorrectable_errors++;
1486 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001487 btrfs_err_rl_in_rcu(fs_info,
1488 "unable to fixup (regular) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001489 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001490 }
1491
1492out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001493 if (sblocks_for_recheck) {
1494 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1495 mirror_index++) {
1496 struct scrub_block *sblock = sblocks_for_recheck +
1497 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001498 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001499 int page_index;
1500
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001501 for (page_index = 0; page_index < sblock->page_count;
1502 page_index++) {
1503 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001504 recover = sblock->pagev[page_index]->recover;
1505 if (recover) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001506 scrub_put_recover(fs_info, recover);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001507 sblock->pagev[page_index]->recover =
1508 NULL;
1509 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001510 scrub_page_put(sblock->pagev[page_index]);
1511 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001512 }
1513 kfree(sblocks_for_recheck);
1514 }
1515
Qu Wenruo28d70e22017-04-14 08:35:55 +08001516 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1517 if (ret < 0)
1518 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001519 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001520}
1521
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001522static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001523{
Zhao Lei10f11902015-01-20 15:11:43 +08001524 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1525 return 2;
1526 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1527 return 3;
1528 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001529 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001530}
1531
Zhao Lei10f11902015-01-20 15:11:43 +08001532static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1533 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001534 u64 mapped_length,
1535 int nstripes, int mirror,
1536 int *stripe_index,
1537 u64 *stripe_offset)
1538{
1539 int i;
1540
Zhao Leiffe2d202015-01-20 15:11:44 +08001541 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001542 /* RAID5/6 */
1543 for (i = 0; i < nstripes; i++) {
1544 if (raid_map[i] == RAID6_Q_STRIPE ||
1545 raid_map[i] == RAID5_P_STRIPE)
1546 continue;
1547
1548 if (logical >= raid_map[i] &&
1549 logical < raid_map[i] + mapped_length)
1550 break;
1551 }
1552
1553 *stripe_index = i;
1554 *stripe_offset = logical - raid_map[i];
1555 } else {
1556 /* The other RAID type */
1557 *stripe_index = mirror;
1558 *stripe_offset = 0;
1559 }
1560}
1561
Zhao Leibe50a8d2015-01-20 15:11:42 +08001562static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001563 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001564{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001565 struct scrub_ctx *sctx = original_sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001566 struct btrfs_fs_info *fs_info = sctx->fs_info;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001567 u64 length = original_sblock->page_count * PAGE_SIZE;
1568 u64 logical = original_sblock->pagev[0]->logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001569 u64 generation = original_sblock->pagev[0]->generation;
1570 u64 flags = original_sblock->pagev[0]->flags;
1571 u64 have_csum = original_sblock->pagev[0]->have_csum;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001572 struct scrub_recover *recover;
1573 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001574 u64 sublen;
1575 u64 mapped_length;
1576 u64 stripe_offset;
1577 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001578 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001579 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001580 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001581 int ret;
1582
1583 /*
Zhao Lei57019342015-01-20 15:11:45 +08001584 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001585 * are not used (and not set) in the blocks that are used for
1586 * the recheck procedure
1587 */
1588
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001589 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001590 sublen = min_t(u64, length, PAGE_SIZE);
1591 mapped_length = sublen;
1592 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001593
1594 /*
1595 * with a length of PAGE_SIZE, each returned stripe
1596 * represents one mirror
1597 */
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001598 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02001599 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
David Sterba825ad4c2017-03-28 14:45:22 +02001600 logical, &mapped_length, &bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001601 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001602 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001603 btrfs_bio_counter_dec(fs_info);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001604 return -EIO;
1605 }
1606
Miao Xieaf8e2d12014-10-23 14:42:50 +08001607 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1608 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001609 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001610 btrfs_bio_counter_dec(fs_info);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001611 return -ENOMEM;
1612 }
1613
Elena Reshetova6f615012017-03-03 10:55:21 +02001614 refcount_set(&recover->refs, 1);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001615 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001616 recover->map_length = mapped_length;
1617
Ashish Samant24731142016-04-29 18:33:59 -07001618 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001619
Zhao Leibe50a8d2015-01-20 15:11:42 +08001620 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001621
Miao Xieaf8e2d12014-10-23 14:42:50 +08001622 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001623 mirror_index++) {
1624 struct scrub_block *sblock;
1625 struct scrub_page *page;
1626
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001627 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001628 sblock->sctx = sctx;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001629
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001630 page = kzalloc(sizeof(*page), GFP_NOFS);
1631 if (!page) {
1632leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001633 spin_lock(&sctx->stat_lock);
1634 sctx->stat.malloc_errors++;
1635 spin_unlock(&sctx->stat_lock);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001636 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001637 return -ENOMEM;
1638 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001639 scrub_page_get(page);
1640 sblock->pagev[page_index] = page;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001641 page->sblock = sblock;
1642 page->flags = flags;
1643 page->generation = generation;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001644 page->logical = logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001645 page->have_csum = have_csum;
1646 if (have_csum)
1647 memcpy(page->csum,
1648 original_sblock->pagev[0]->csum,
1649 sctx->csum_size);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001650
Zhao Lei10f11902015-01-20 15:11:43 +08001651 scrub_stripe_index_and_offset(logical,
1652 bbio->map_type,
1653 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001654 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001655 bbio->num_stripes -
1656 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001657 mirror_index,
1658 &stripe_index,
1659 &stripe_offset);
1660 page->physical = bbio->stripes[stripe_index].physical +
1661 stripe_offset;
1662 page->dev = bbio->stripes[stripe_index].dev;
1663
Stefan Behrensff023aa2012-11-06 11:43:11 +01001664 BUG_ON(page_index >= original_sblock->page_count);
1665 page->physical_for_dev_replace =
1666 original_sblock->pagev[page_index]->
1667 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001668 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001669 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001670 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001671 page->page = alloc_page(GFP_NOFS);
1672 if (!page->page)
1673 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001674
1675 scrub_get_recover(recover);
1676 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001677 }
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001678 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001679 length -= sublen;
1680 logical += sublen;
1681 page_index++;
1682 }
1683
1684 return 0;
1685}
1686
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001687static void scrub_bio_wait_endio(struct bio *bio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001688{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001689 complete(bio->bi_private);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001690}
1691
Miao Xieaf8e2d12014-10-23 14:42:50 +08001692static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1693 struct bio *bio,
1694 struct scrub_page *page)
1695{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001696 DECLARE_COMPLETION_ONSTACK(done);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001697 int ret;
Liu Bo762221f2018-01-02 13:36:42 -07001698 int mirror_num;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001699
Miao Xieaf8e2d12014-10-23 14:42:50 +08001700 bio->bi_iter.bi_sector = page->logical >> 9;
1701 bio->bi_private = &done;
1702 bio->bi_end_io = scrub_bio_wait_endio;
1703
Liu Bo762221f2018-01-02 13:36:42 -07001704 mirror_num = page->sblock->pagev[0]->mirror_num;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001705 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001706 page->recover->map_length,
Liu Bo762221f2018-01-02 13:36:42 -07001707 mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001708 if (ret)
1709 return ret;
1710
Liu Bob4ff5ad2017-11-30 17:26:39 -07001711 wait_for_completion_io(&done);
1712 return blk_status_to_errno(bio->bi_status);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001713}
1714
Liu Bo6ca17652018-03-07 12:08:09 -07001715static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1716 struct scrub_block *sblock)
1717{
1718 struct scrub_page *first_page = sblock->pagev[0];
1719 struct bio *bio;
1720 int page_num;
1721
1722 /* All pages in sblock belong to the same stripe on the same device. */
1723 ASSERT(first_page->dev);
1724 if (!first_page->dev->bdev)
1725 goto out;
1726
1727 bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1728 bio_set_dev(bio, first_page->dev->bdev);
1729
1730 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1731 struct scrub_page *page = sblock->pagev[page_num];
1732
1733 WARN_ON(!page->page);
1734 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1735 }
1736
1737 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1738 bio_put(bio);
1739 goto out;
1740 }
1741
1742 bio_put(bio);
1743
1744 scrub_recheck_block_checksum(sblock);
1745
1746 return;
1747out:
1748 for (page_num = 0; page_num < sblock->page_count; page_num++)
1749 sblock->pagev[page_num]->io_error = 1;
1750
1751 sblock->no_io_error_seen = 0;
1752}
1753
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001754/*
1755 * this function will check the on disk data for checksum errors, header
1756 * errors and read I/O errors. If any I/O errors happen, the exact pages
1757 * which are errored are marked as being bad. The goal is to enable scrub
1758 * to take those pages that are not errored from all the mirrors so that
1759 * the pages that are errored in the just handled mirror can be repaired.
1760 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001761static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +08001762 struct scrub_block *sblock,
1763 int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001764{
1765 int page_num;
1766
1767 sblock->no_io_error_seen = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001768
Liu Bo6ca17652018-03-07 12:08:09 -07001769 /* short cut for raid56 */
1770 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1771 return scrub_recheck_block_on_raid56(fs_info, sblock);
1772
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001773 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1774 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001775 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001776
Stefan Behrens442a4f62012-05-25 16:06:08 +02001777 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001778 page->io_error = 1;
1779 sblock->no_io_error_seen = 0;
1780 continue;
1781 }
1782
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001783 WARN_ON(!page->page);
David Sterbac5e4c3d2017-06-12 17:29:41 +02001784 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001785 bio_set_dev(bio, page->dev->bdev);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001786
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001787 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Liu Bo6ca17652018-03-07 12:08:09 -07001788 bio->bi_iter.bi_sector = page->physical >> 9;
1789 bio->bi_opf = REQ_OP_READ;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001790
Liu Bo6ca17652018-03-07 12:08:09 -07001791 if (btrfsic_submit_bio_wait(bio)) {
1792 page->io_error = 1;
1793 sblock->no_io_error_seen = 0;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001794 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001795
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001796 bio_put(bio);
1797 }
1798
1799 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08001800 scrub_recheck_block_checksum(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001801}
1802
Miao Xie17a9be22014-07-24 11:37:08 +08001803static inline int scrub_check_fsid(u8 fsid[],
1804 struct scrub_page *spage)
1805{
1806 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1807 int ret;
1808
Anand Jain44880fd2017-07-29 17:50:09 +08001809 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
Miao Xie17a9be22014-07-24 11:37:08 +08001810 return !ret;
1811}
1812
Zhao Leiba7cf982015-08-24 21:18:02 +08001813static void scrub_recheck_block_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001814{
Zhao Leiba7cf982015-08-24 21:18:02 +08001815 sblock->header_error = 0;
1816 sblock->checksum_error = 0;
1817 sblock->generation_error = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001818
Zhao Leiba7cf982015-08-24 21:18:02 +08001819 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1820 scrub_checksum_data(sblock);
1821 else
1822 scrub_checksum_tree_block(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001823}
1824
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001825static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001826 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001827{
1828 int page_num;
1829 int ret = 0;
1830
1831 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1832 int ret_sub;
1833
1834 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1835 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001836 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001837 if (ret_sub)
1838 ret = ret_sub;
1839 }
1840
1841 return ret;
1842}
1843
1844static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1845 struct scrub_block *sblock_good,
1846 int page_num, int force_write)
1847{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001848 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1849 struct scrub_page *page_good = sblock_good->pagev[page_num];
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001850 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001851
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001852 BUG_ON(page_bad->page == NULL);
1853 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001854 if (force_write || sblock_bad->header_error ||
1855 sblock_bad->checksum_error || page_bad->io_error) {
1856 struct bio *bio;
1857 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001858
Stefan Behrensff023aa2012-11-06 11:43:11 +01001859 if (!page_bad->dev->bdev) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001860 btrfs_warn_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001861 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001862 return -EIO;
1863 }
1864
David Sterbac5e4c3d2017-06-12 17:29:41 +02001865 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001866 bio_set_dev(bio, page_bad->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001867 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001868 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001869
1870 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1871 if (PAGE_SIZE != ret) {
1872 bio_put(bio);
1873 return -EIO;
1874 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001875
Mike Christie4e49ea42016-06-05 14:31:41 -05001876 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001877 btrfs_dev_stat_inc_and_print(page_bad->dev,
1878 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001879 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001880 &fs_info->dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001881 bio_put(bio);
1882 return -EIO;
1883 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001884 bio_put(bio);
1885 }
1886
1887 return 0;
1888}
1889
Stefan Behrensff023aa2012-11-06 11:43:11 +01001890static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1891{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001892 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001893 int page_num;
1894
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001895 /*
1896 * This block is used for the check of the parity on the source device,
1897 * so the data needn't be written into the destination device.
1898 */
1899 if (sblock->sparity)
1900 return;
1901
Stefan Behrensff023aa2012-11-06 11:43:11 +01001902 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1903 int ret;
1904
1905 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1906 if (ret)
1907 btrfs_dev_replace_stats_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001908 &fs_info->dev_replace.num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001909 }
1910}
1911
1912static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1913 int page_num)
1914{
1915 struct scrub_page *spage = sblock->pagev[page_num];
1916
1917 BUG_ON(spage->page == NULL);
1918 if (spage->io_error) {
1919 void *mapped_buffer = kmap_atomic(spage->page);
1920
David Sterba619a9742017-03-29 20:48:44 +02001921 clear_page(mapped_buffer);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001922 flush_dcache_page(spage->page);
1923 kunmap_atomic(mapped_buffer);
1924 }
1925 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1926}
1927
1928static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1929 struct scrub_page *spage)
1930{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001931 struct scrub_bio *sbio;
1932 int ret;
1933
David Sterba3fb99302017-05-16 19:10:32 +02001934 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001935again:
David Sterba3fb99302017-05-16 19:10:32 +02001936 if (!sctx->wr_curr_bio) {
1937 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
David Sterba58c4e172016-02-11 10:49:42 +01001938 GFP_KERNEL);
David Sterba3fb99302017-05-16 19:10:32 +02001939 if (!sctx->wr_curr_bio) {
1940 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001941 return -ENOMEM;
1942 }
David Sterba3fb99302017-05-16 19:10:32 +02001943 sctx->wr_curr_bio->sctx = sctx;
1944 sctx->wr_curr_bio->page_count = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001945 }
David Sterba3fb99302017-05-16 19:10:32 +02001946 sbio = sctx->wr_curr_bio;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001947 if (sbio->page_count == 0) {
1948 struct bio *bio;
1949
1950 sbio->physical = spage->physical_for_dev_replace;
1951 sbio->logical = spage->logical;
David Sterba3fb99302017-05-16 19:10:32 +02001952 sbio->dev = sctx->wr_tgtdev;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001953 bio = sbio->bio;
1954 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02001955 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001956 sbio->bio = bio;
1957 }
1958
1959 bio->bi_private = sbio;
1960 bio->bi_end_io = scrub_wr_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001961 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001962 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05001963 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001964 sbio->status = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001965 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1966 spage->physical_for_dev_replace ||
1967 sbio->logical + sbio->page_count * PAGE_SIZE !=
1968 spage->logical) {
1969 scrub_wr_submit(sctx);
1970 goto again;
1971 }
1972
1973 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1974 if (ret != PAGE_SIZE) {
1975 if (sbio->page_count < 1) {
1976 bio_put(sbio->bio);
1977 sbio->bio = NULL;
David Sterba3fb99302017-05-16 19:10:32 +02001978 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001979 return -EIO;
1980 }
1981 scrub_wr_submit(sctx);
1982 goto again;
1983 }
1984
1985 sbio->pagev[sbio->page_count] = spage;
1986 scrub_page_get(spage);
1987 sbio->page_count++;
David Sterba3fb99302017-05-16 19:10:32 +02001988 if (sbio->page_count == sctx->pages_per_wr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001989 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02001990 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001991
1992 return 0;
1993}
1994
1995static void scrub_wr_submit(struct scrub_ctx *sctx)
1996{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001997 struct scrub_bio *sbio;
1998
David Sterba3fb99302017-05-16 19:10:32 +02001999 if (!sctx->wr_curr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002000 return;
2001
David Sterba3fb99302017-05-16 19:10:32 +02002002 sbio = sctx->wr_curr_bio;
2003 sctx->wr_curr_bio = NULL;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002004 WARN_ON(!sbio->bio->bi_disk);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002005 scrub_pending_bio_inc(sctx);
2006 /* process all writes in a single worker thread. Then the block layer
2007 * orders the requests before sending them to the driver which
2008 * doubled the write performance on spinning disks when measured
2009 * with Linux 3.5 */
Mike Christie4e49ea42016-06-05 14:31:41 -05002010 btrfsic_submit_bio(sbio->bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002011}
2012
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002013static void scrub_wr_bio_end_io(struct bio *bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002014{
2015 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002016 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002017
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002018 sbio->status = bio->bi_status;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002019 sbio->bio = bio;
2020
Liu Bo9e0af232014-08-15 23:36:53 +08002021 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
2022 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08002023 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002024}
2025
2026static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
2027{
2028 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2029 struct scrub_ctx *sctx = sbio->sctx;
2030 int i;
2031
2032 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002033 if (sbio->status) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01002034 struct btrfs_dev_replace *dev_replace =
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002035 &sbio->sctx->fs_info->dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002036
2037 for (i = 0; i < sbio->page_count; i++) {
2038 struct scrub_page *spage = sbio->pagev[i];
2039
2040 spage->io_error = 1;
2041 btrfs_dev_replace_stats_inc(&dev_replace->
2042 num_write_errors);
2043 }
2044 }
2045
2046 for (i = 0; i < sbio->page_count; i++)
2047 scrub_page_put(sbio->pagev[i]);
2048
2049 bio_put(sbio->bio);
2050 kfree(sbio);
2051 scrub_pending_bio_dec(sctx);
2052}
2053
2054static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002055{
2056 u64 flags;
2057 int ret;
2058
Zhao Leiba7cf982015-08-24 21:18:02 +08002059 /*
2060 * No need to initialize these stats currently,
2061 * because this function only use return value
2062 * instead of these stats value.
2063 *
2064 * Todo:
2065 * always use stats
2066 */
2067 sblock->header_error = 0;
2068 sblock->generation_error = 0;
2069 sblock->checksum_error = 0;
2070
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002071 WARN_ON(sblock->page_count < 1);
2072 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002073 ret = 0;
2074 if (flags & BTRFS_EXTENT_FLAG_DATA)
2075 ret = scrub_checksum_data(sblock);
2076 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2077 ret = scrub_checksum_tree_block(sblock);
2078 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
2079 (void)scrub_checksum_super(sblock);
2080 else
2081 WARN_ON(1);
2082 if (ret)
2083 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002084
2085 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002086}
2087
2088static int scrub_checksum_data(struct scrub_block *sblock)
2089{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002090 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002091 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002092 u8 *on_disk_csum;
2093 struct page *page;
2094 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01002095 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002096 u64 len;
2097 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002098
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002099 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002100 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002101 return 0;
2102
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002103 on_disk_csum = sblock->pagev[0]->csum;
2104 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002105 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002106
David Sterba25cc1222017-05-16 19:10:41 +02002107 len = sctx->fs_info->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002108 index = 0;
2109 for (;;) {
2110 u64 l = min_t(u64, len, PAGE_SIZE);
2111
Liu Bob0496682013-03-14 14:57:45 +00002112 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002113 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002114 len -= l;
2115 if (len == 0)
2116 break;
2117 index++;
2118 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002119 BUG_ON(!sblock->pagev[index]->page);
2120 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002121 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002122 }
2123
Arne Jansena2de7332011-03-08 14:14:00 +01002124 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002125 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08002126 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002127
Zhao Leiba7cf982015-08-24 21:18:02 +08002128 return sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01002129}
2130
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002131static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01002132{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002133 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01002134 struct btrfs_header *h;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002135 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002136 u8 calculated_csum[BTRFS_CSUM_SIZE];
2137 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2138 struct page *page;
2139 void *mapped_buffer;
2140 u64 mapped_size;
2141 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002142 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002143 u64 len;
2144 int index;
2145
2146 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002147 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002148 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002149 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002150 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002151
2152 /*
2153 * we don't use the getter functions here, as we
2154 * a) don't have an extent buffer and
2155 * b) the page is already kmapped
2156 */
Qu Wenruo3cae2102013-07-16 11:19:18 +08002157 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Zhao Leiba7cf982015-08-24 21:18:02 +08002158 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002159
Zhao Leiba7cf982015-08-24 21:18:02 +08002160 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
2161 sblock->header_error = 1;
2162 sblock->generation_error = 1;
2163 }
Arne Jansena2de7332011-03-08 14:14:00 +01002164
Miao Xie17a9be22014-07-24 11:37:08 +08002165 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Zhao Leiba7cf982015-08-24 21:18:02 +08002166 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002167
2168 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2169 BTRFS_UUID_SIZE))
Zhao Leiba7cf982015-08-24 21:18:02 +08002170 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002171
David Sterba25cc1222017-05-16 19:10:41 +02002172 len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002173 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2174 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2175 index = 0;
2176 for (;;) {
2177 u64 l = min_t(u64, len, mapped_size);
2178
Liu Bob0496682013-03-14 14:57:45 +00002179 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002180 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002181 len -= l;
2182 if (len == 0)
2183 break;
2184 index++;
2185 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002186 BUG_ON(!sblock->pagev[index]->page);
2187 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002188 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002189 mapped_size = PAGE_SIZE;
2190 p = mapped_buffer;
2191 }
2192
2193 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002194 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08002195 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002196
Zhao Leiba7cf982015-08-24 21:18:02 +08002197 return sblock->header_error || sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01002198}
2199
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002200static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01002201{
2202 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002203 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002204 u8 calculated_csum[BTRFS_CSUM_SIZE];
2205 u8 on_disk_csum[BTRFS_CSUM_SIZE];
2206 struct page *page;
2207 void *mapped_buffer;
2208 u64 mapped_size;
2209 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01002210 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02002211 int fail_gen = 0;
2212 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002213 u64 len;
2214 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01002215
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002216 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002217 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002218 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002219 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002220 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002221
Qu Wenruo3cae2102013-07-16 11:19:18 +08002222 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002223 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002224
Qu Wenruo3cae2102013-07-16 11:19:18 +08002225 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002226 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002227
Miao Xie17a9be22014-07-24 11:37:08 +08002228 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002229 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002230
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002231 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2232 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2233 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2234 index = 0;
2235 for (;;) {
2236 u64 l = min_t(u64, len, mapped_size);
2237
Liu Bob0496682013-03-14 14:57:45 +00002238 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002239 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002240 len -= l;
2241 if (len == 0)
2242 break;
2243 index++;
2244 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002245 BUG_ON(!sblock->pagev[index]->page);
2246 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002247 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002248 mapped_size = PAGE_SIZE;
2249 p = mapped_buffer;
2250 }
2251
2252 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002253 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002254 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002255
Stefan Behrens442a4f62012-05-25 16:06:08 +02002256 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01002257 /*
2258 * if we find an error in a super block, we just report it.
2259 * They will get written with the next transaction commit
2260 * anyway
2261 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002262 spin_lock(&sctx->stat_lock);
2263 ++sctx->stat.super_errors;
2264 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002265 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002266 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002267 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2268 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002269 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002270 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002271 }
2272
Stefan Behrens442a4f62012-05-25 16:06:08 +02002273 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002274}
2275
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002276static void scrub_block_get(struct scrub_block *sblock)
2277{
Elena Reshetova186debd2017-03-03 10:55:23 +02002278 refcount_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002279}
2280
2281static void scrub_block_put(struct scrub_block *sblock)
2282{
Elena Reshetova186debd2017-03-03 10:55:23 +02002283 if (refcount_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002284 int i;
2285
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002286 if (sblock->sparity)
2287 scrub_parity_put(sblock->sparity);
2288
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002289 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002290 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002291 kfree(sblock);
2292 }
2293}
2294
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002295static void scrub_page_get(struct scrub_page *spage)
2296{
Zhao Lei57019342015-01-20 15:11:45 +08002297 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002298}
2299
2300static void scrub_page_put(struct scrub_page *spage)
2301{
Zhao Lei57019342015-01-20 15:11:45 +08002302 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002303 if (spage->page)
2304 __free_page(spage->page);
2305 kfree(spage);
2306 }
2307}
2308
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002309static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002310{
2311 struct scrub_bio *sbio;
2312
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002313 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002314 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002315
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002316 sbio = sctx->bios[sctx->curr];
2317 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002318 scrub_pending_bio_inc(sctx);
Mike Christie4e49ea42016-06-05 14:31:41 -05002319 btrfsic_submit_bio(sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002320}
2321
Stefan Behrensff023aa2012-11-06 11:43:11 +01002322static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2323 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002324{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002325 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002326 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002327 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002328
2329again:
2330 /*
2331 * grab a fresh bio or wait for one to become available
2332 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002333 while (sctx->curr == -1) {
2334 spin_lock(&sctx->list_lock);
2335 sctx->curr = sctx->first_free;
2336 if (sctx->curr != -1) {
2337 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2338 sctx->bios[sctx->curr]->next_free = -1;
2339 sctx->bios[sctx->curr]->page_count = 0;
2340 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002341 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002342 spin_unlock(&sctx->list_lock);
2343 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002344 }
2345 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002346 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002347 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002348 struct bio *bio;
2349
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002350 sbio->physical = spage->physical;
2351 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002352 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002353 bio = sbio->bio;
2354 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02002355 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002356 sbio->bio = bio;
2357 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002358
2359 bio->bi_private = sbio;
2360 bio->bi_end_io = scrub_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002361 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002362 bio->bi_iter.bi_sector = sbio->physical >> 9;
Mike Christie37226b22016-06-05 14:31:52 -05002363 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002364 sbio->status = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002365 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2366 spage->physical ||
2367 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002368 spage->logical ||
2369 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002370 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002371 goto again;
2372 }
2373
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002374 sbio->pagev[sbio->page_count] = spage;
2375 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2376 if (ret != PAGE_SIZE) {
2377 if (sbio->page_count < 1) {
2378 bio_put(sbio->bio);
2379 sbio->bio = NULL;
2380 return -EIO;
2381 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002382 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002383 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002384 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002385
Stefan Behrensff023aa2012-11-06 11:43:11 +01002386 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002387 atomic_inc(&sblock->outstanding_pages);
2388 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002389 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002390 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002391
2392 return 0;
2393}
2394
Linus Torvalds22365972015-09-05 15:14:43 -07002395static void scrub_missing_raid56_end_io(struct bio *bio)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002396{
2397 struct scrub_block *sblock = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002398 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002399
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002400 if (bio->bi_status)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002401 sblock->no_io_error_seen = 0;
2402
Scott Talbert46732722016-05-09 09:14:28 -04002403 bio_put(bio);
2404
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002405 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2406}
2407
2408static void scrub_missing_raid56_worker(struct btrfs_work *work)
2409{
2410 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2411 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002412 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002413 u64 logical;
2414 struct btrfs_device *dev;
2415
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002416 logical = sblock->pagev[0]->logical;
2417 dev = sblock->pagev[0]->dev;
2418
Zhao Leiaffe4a52015-08-24 21:32:06 +08002419 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08002420 scrub_recheck_block_checksum(sblock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002421
2422 if (!sblock->no_io_error_seen) {
2423 spin_lock(&sctx->stat_lock);
2424 sctx->stat.read_errors++;
2425 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002426 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002427 "IO error rebuilding logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002428 logical, rcu_str_deref(dev->name));
2429 } else if (sblock->header_error || sblock->checksum_error) {
2430 spin_lock(&sctx->stat_lock);
2431 sctx->stat.uncorrectable_errors++;
2432 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002433 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002434 "failed to rebuild valid logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002435 logical, rcu_str_deref(dev->name));
2436 } else {
2437 scrub_write_block_to_dev_replace(sblock);
2438 }
2439
2440 scrub_block_put(sblock);
2441
David Sterba2073c4c2017-03-31 17:12:51 +02002442 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002443 mutex_lock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002444 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002445 mutex_unlock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002446 }
2447
2448 scrub_pending_bio_dec(sctx);
2449}
2450
2451static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2452{
2453 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002454 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002455 u64 length = sblock->page_count * PAGE_SIZE;
2456 u64 logical = sblock->pagev[0]->logical;
Zhao Leif1fee652016-05-17 17:37:38 +08002457 struct btrfs_bio *bbio = NULL;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002458 struct bio *bio;
2459 struct btrfs_raid_bio *rbio;
2460 int ret;
2461 int i;
2462
Qu Wenruoae6529c2017-03-29 09:33:21 +08002463 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002464 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
David Sterba825ad4c2017-03-28 14:45:22 +02002465 &length, &bbio);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002466 if (ret || !bbio || !bbio->raid_map)
2467 goto bbio_out;
2468
2469 if (WARN_ON(!sctx->is_dev_replace ||
2470 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2471 /*
2472 * We shouldn't be scrubbing a missing device. Even for dev
2473 * replace, we should only get here for RAID 5/6. We either
2474 * managed to mount something with no mirrors remaining or
2475 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2476 */
2477 goto bbio_out;
2478 }
2479
David Sterbac5e4c3d2017-06-12 17:29:41 +02002480 bio = btrfs_io_bio_alloc(0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002481 bio->bi_iter.bi_sector = logical >> 9;
2482 bio->bi_private = sblock;
2483 bio->bi_end_io = scrub_missing_raid56_end_io;
2484
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002485 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002486 if (!rbio)
2487 goto rbio_out;
2488
2489 for (i = 0; i < sblock->page_count; i++) {
2490 struct scrub_page *spage = sblock->pagev[i];
2491
2492 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2493 }
2494
2495 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2496 scrub_missing_raid56_worker, NULL, NULL);
2497 scrub_block_get(sblock);
2498 scrub_pending_bio_inc(sctx);
2499 raid56_submit_missing_rbio(rbio);
2500 return;
2501
2502rbio_out:
2503 bio_put(bio);
2504bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002505 btrfs_bio_counter_dec(fs_info);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002506 btrfs_put_bbio(bbio);
2507 spin_lock(&sctx->stat_lock);
2508 sctx->stat.malloc_errors++;
2509 spin_unlock(&sctx->stat_lock);
2510}
2511
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002512static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002513 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002514 u64 gen, int mirror_num, u8 *csum, int force,
2515 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002516{
2517 struct scrub_block *sblock;
2518 int index;
2519
David Sterba58c4e172016-02-11 10:49:42 +01002520 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002521 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002522 spin_lock(&sctx->stat_lock);
2523 sctx->stat.malloc_errors++;
2524 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002525 return -ENOMEM;
2526 }
2527
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002528 /* one ref inside this function, plus one for each page added to
2529 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002530 refcount_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002531 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002532 sblock->no_io_error_seen = 1;
2533
2534 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002535 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002536 u64 l = min_t(u64, len, PAGE_SIZE);
2537
David Sterba58c4e172016-02-11 10:49:42 +01002538 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002539 if (!spage) {
2540leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002541 spin_lock(&sctx->stat_lock);
2542 sctx->stat.malloc_errors++;
2543 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002544 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002545 return -ENOMEM;
2546 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002547 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2548 scrub_page_get(spage);
2549 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002550 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002551 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002552 spage->flags = flags;
2553 spage->generation = gen;
2554 spage->logical = logical;
2555 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002556 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002557 spage->mirror_num = mirror_num;
2558 if (csum) {
2559 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002560 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002561 } else {
2562 spage->have_csum = 0;
2563 }
2564 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002565 spage->page = alloc_page(GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002566 if (!spage->page)
2567 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002568 len -= l;
2569 logical += l;
2570 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002571 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002572 }
2573
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002574 WARN_ON(sblock->page_count == 0);
Anand Jaine6e674b2017-12-04 12:54:54 +08002575 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002576 /*
2577 * This case should only be hit for RAID 5/6 device replace. See
2578 * the comment in scrub_missing_raid56_pages() for details.
2579 */
2580 scrub_missing_raid56_pages(sblock);
2581 } else {
2582 for (index = 0; index < sblock->page_count; index++) {
2583 struct scrub_page *spage = sblock->pagev[index];
2584 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002585
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002586 ret = scrub_add_page_to_rd_bio(sctx, spage);
2587 if (ret) {
2588 scrub_block_put(sblock);
2589 return ret;
2590 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002591 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002592
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002593 if (force)
2594 scrub_submit(sctx);
2595 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002596
2597 /* last one frees, either here or in bio completion for last page */
2598 scrub_block_put(sblock);
2599 return 0;
2600}
2601
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002602static void scrub_bio_end_io(struct bio *bio)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002603{
2604 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002605 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002606
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002607 sbio->status = bio->bi_status;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002608 sbio->bio = bio;
2609
Qu Wenruo0339ef22014-02-28 10:46:17 +08002610 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002611}
2612
2613static void scrub_bio_end_io_worker(struct btrfs_work *work)
2614{
2615 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002616 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002617 int i;
2618
Stefan Behrensff023aa2012-11-06 11:43:11 +01002619 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002620 if (sbio->status) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002621 for (i = 0; i < sbio->page_count; i++) {
2622 struct scrub_page *spage = sbio->pagev[i];
2623
2624 spage->io_error = 1;
2625 spage->sblock->no_io_error_seen = 0;
2626 }
2627 }
2628
2629 /* now complete the scrub_block items that have all pages completed */
2630 for (i = 0; i < sbio->page_count; i++) {
2631 struct scrub_page *spage = sbio->pagev[i];
2632 struct scrub_block *sblock = spage->sblock;
2633
2634 if (atomic_dec_and_test(&sblock->outstanding_pages))
2635 scrub_block_complete(sblock);
2636 scrub_block_put(sblock);
2637 }
2638
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002639 bio_put(sbio->bio);
2640 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002641 spin_lock(&sctx->list_lock);
2642 sbio->next_free = sctx->first_free;
2643 sctx->first_free = sbio->index;
2644 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002645
David Sterba2073c4c2017-03-31 17:12:51 +02002646 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002647 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002648 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002649 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002650 }
2651
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002652 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002653}
2654
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002655static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2656 unsigned long *bitmap,
2657 u64 start, u64 len)
2658{
Liu Bo972d7212017-04-03 13:45:33 -07002659 u64 offset;
David Sterba7736b0a2017-03-31 18:02:48 +02002660 u64 nsectors64;
2661 u32 nsectors;
Jeff Mahoneyda170662016-06-15 09:22:56 -04002662 int sectorsize = sparity->sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002663
2664 if (len >= sparity->stripe_len) {
2665 bitmap_set(bitmap, 0, sparity->nsectors);
2666 return;
2667 }
2668
2669 start -= sparity->logic_start;
Liu Bo972d7212017-04-03 13:45:33 -07002670 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2671 offset = div_u64(offset, sectorsize);
David Sterba7736b0a2017-03-31 18:02:48 +02002672 nsectors64 = div_u64(len, sectorsize);
2673
2674 ASSERT(nsectors64 < UINT_MAX);
2675 nsectors = (u32)nsectors64;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002676
2677 if (offset + nsectors <= sparity->nsectors) {
2678 bitmap_set(bitmap, offset, nsectors);
2679 return;
2680 }
2681
2682 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2683 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2684}
2685
2686static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2687 u64 start, u64 len)
2688{
2689 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2690}
2691
2692static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2693 u64 start, u64 len)
2694{
2695 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2696}
2697
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002698static void scrub_block_complete(struct scrub_block *sblock)
2699{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002700 int corrupted = 0;
2701
Stefan Behrensff023aa2012-11-06 11:43:11 +01002702 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002703 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002704 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002705 } else {
2706 /*
2707 * if has checksum error, write via repair mechanism in
2708 * dev replace case, otherwise write here in dev replace
2709 * case.
2710 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002711 corrupted = scrub_checksum(sblock);
2712 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002713 scrub_write_block_to_dev_replace(sblock);
2714 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002715
2716 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2717 u64 start = sblock->pagev[0]->logical;
2718 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2719 PAGE_SIZE;
2720
2721 scrub_parity_mark_sectors_error(sblock->sparity,
2722 start, end - start);
2723 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002724}
2725
Zhao Lei3b5753e2015-08-24 22:03:02 +08002726static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002727{
2728 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002729 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002730 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002731
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002732 while (!list_empty(&sctx->csum_list)) {
2733 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002734 struct btrfs_ordered_sum, list);
2735 if (sum->bytenr > logical)
2736 return 0;
2737 if (sum->bytenr + sum->len > logical)
2738 break;
2739
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002740 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002741 list_del(&sum->list);
2742 kfree(sum);
2743 sum = NULL;
2744 }
2745 if (!sum)
2746 return 0;
2747
David Sterba1d1bf922017-03-31 18:02:48 +02002748 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2749 ASSERT(index < UINT_MAX);
2750
David Sterba25cc1222017-05-16 19:10:41 +02002751 num_sectors = sum->len / sctx->fs_info->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002752 memcpy(csum, sum->sums + index, sctx->csum_size);
2753 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002754 list_del(&sum->list);
2755 kfree(sum);
2756 }
Miao Xief51a4a12013-06-19 10:36:09 +08002757 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002758}
2759
2760/* scrub extent tries to collect up to 64 kB for each bio */
Liu Bo6ca17652018-03-07 12:08:09 -07002761static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2762 u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002763 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002764 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002765{
2766 int ret;
2767 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002768 u32 blocksize;
2769
2770 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002771 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2772 blocksize = map->stripe_len;
2773 else
2774 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002775 spin_lock(&sctx->stat_lock);
2776 sctx->stat.data_extents_scrubbed++;
2777 sctx->stat.data_bytes_scrubbed += len;
2778 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002779 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002780 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2781 blocksize = map->stripe_len;
2782 else
2783 blocksize = sctx->fs_info->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002784 spin_lock(&sctx->stat_lock);
2785 sctx->stat.tree_extents_scrubbed++;
2786 sctx->stat.tree_bytes_scrubbed += len;
2787 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002788 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002789 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002790 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002791 }
Arne Jansena2de7332011-03-08 14:14:00 +01002792
2793 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002794 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002795 int have_csum = 0;
2796
2797 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2798 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002799 have_csum = scrub_find_csum(sctx, logical, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002800 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002801 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002802 if (sctx->is_dev_replace && !have_csum) {
2803 ret = copy_nocow_pages(sctx, logical, l,
2804 mirror_num,
2805 physical_for_dev_replace);
2806 goto behind_scrub_pages;
2807 }
Arne Jansena2de7332011-03-08 14:14:00 +01002808 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002809 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002810 mirror_num, have_csum ? csum : NULL, 0,
2811 physical_for_dev_replace);
2812behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002813 if (ret)
2814 return ret;
2815 len -= l;
2816 logical += l;
2817 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002818 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002819 }
2820 return 0;
2821}
2822
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002823static int scrub_pages_for_parity(struct scrub_parity *sparity,
2824 u64 logical, u64 len,
2825 u64 physical, struct btrfs_device *dev,
2826 u64 flags, u64 gen, int mirror_num, u8 *csum)
2827{
2828 struct scrub_ctx *sctx = sparity->sctx;
2829 struct scrub_block *sblock;
2830 int index;
2831
David Sterba58c4e172016-02-11 10:49:42 +01002832 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002833 if (!sblock) {
2834 spin_lock(&sctx->stat_lock);
2835 sctx->stat.malloc_errors++;
2836 spin_unlock(&sctx->stat_lock);
2837 return -ENOMEM;
2838 }
2839
2840 /* one ref inside this function, plus one for each page added to
2841 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002842 refcount_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002843 sblock->sctx = sctx;
2844 sblock->no_io_error_seen = 1;
2845 sblock->sparity = sparity;
2846 scrub_parity_get(sparity);
2847
2848 for (index = 0; len > 0; index++) {
2849 struct scrub_page *spage;
2850 u64 l = min_t(u64, len, PAGE_SIZE);
2851
David Sterba58c4e172016-02-11 10:49:42 +01002852 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002853 if (!spage) {
2854leave_nomem:
2855 spin_lock(&sctx->stat_lock);
2856 sctx->stat.malloc_errors++;
2857 spin_unlock(&sctx->stat_lock);
2858 scrub_block_put(sblock);
2859 return -ENOMEM;
2860 }
2861 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2862 /* For scrub block */
2863 scrub_page_get(spage);
2864 sblock->pagev[index] = spage;
2865 /* For scrub parity */
2866 scrub_page_get(spage);
2867 list_add_tail(&spage->list, &sparity->spages);
2868 spage->sblock = sblock;
2869 spage->dev = dev;
2870 spage->flags = flags;
2871 spage->generation = gen;
2872 spage->logical = logical;
2873 spage->physical = physical;
2874 spage->mirror_num = mirror_num;
2875 if (csum) {
2876 spage->have_csum = 1;
2877 memcpy(spage->csum, csum, sctx->csum_size);
2878 } else {
2879 spage->have_csum = 0;
2880 }
2881 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002882 spage->page = alloc_page(GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002883 if (!spage->page)
2884 goto leave_nomem;
2885 len -= l;
2886 logical += l;
2887 physical += l;
2888 }
2889
2890 WARN_ON(sblock->page_count == 0);
2891 for (index = 0; index < sblock->page_count; index++) {
2892 struct scrub_page *spage = sblock->pagev[index];
2893 int ret;
2894
2895 ret = scrub_add_page_to_rd_bio(sctx, spage);
2896 if (ret) {
2897 scrub_block_put(sblock);
2898 return ret;
2899 }
2900 }
2901
2902 /* last one frees, either here or in bio completion for last page */
2903 scrub_block_put(sblock);
2904 return 0;
2905}
2906
2907static int scrub_extent_for_parity(struct scrub_parity *sparity,
2908 u64 logical, u64 len,
2909 u64 physical, struct btrfs_device *dev,
2910 u64 flags, u64 gen, int mirror_num)
2911{
2912 struct scrub_ctx *sctx = sparity->sctx;
2913 int ret;
2914 u8 csum[BTRFS_CSUM_SIZE];
2915 u32 blocksize;
2916
Anand Jaine6e674b2017-12-04 12:54:54 +08002917 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval4a770892015-06-19 11:52:52 -07002918 scrub_parity_mark_sectors_error(sparity, logical, len);
2919 return 0;
2920 }
2921
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002922 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002923 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002924 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002925 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002926 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002927 blocksize = sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002928 WARN_ON(1);
2929 }
2930
2931 while (len) {
2932 u64 l = min_t(u64, len, blocksize);
2933 int have_csum = 0;
2934
2935 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2936 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002937 have_csum = scrub_find_csum(sctx, logical, csum);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002938 if (have_csum == 0)
2939 goto skip;
2940 }
2941 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2942 flags, gen, mirror_num,
2943 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002944 if (ret)
2945 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002946skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002947 len -= l;
2948 logical += l;
2949 physical += l;
2950 }
2951 return 0;
2952}
2953
Wang Shilong3b080b22014-04-01 18:01:43 +08002954/*
2955 * Given a physical address, this will calculate it's
2956 * logical offset. if this is a parity stripe, it will return
2957 * the most left data stripe's logical offset.
2958 *
2959 * return 0 if it is a data stripe, 1 means parity stripe.
2960 */
2961static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002962 struct map_lookup *map, u64 *offset,
2963 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002964{
2965 int i;
2966 int j = 0;
2967 u64 stripe_nr;
2968 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002969 u32 stripe_index;
2970 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002971
2972 last_offset = (physical - map->stripes[num].physical) *
2973 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002974 if (stripe_start)
2975 *stripe_start = last_offset;
2976
Wang Shilong3b080b22014-04-01 18:01:43 +08002977 *offset = last_offset;
2978 for (i = 0; i < nr_data_stripes(map); i++) {
2979 *offset = last_offset + i * map->stripe_len;
2980
Liu Bo42c61ab2017-04-03 13:45:24 -07002981 stripe_nr = div64_u64(*offset, map->stripe_len);
David Sterbab8b93ad2015-01-16 17:26:13 +01002982 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002983
2984 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002985 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002986 /* calculate which stripe this data locates */
2987 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002988 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002989 if (stripe_index == num)
2990 return 0;
2991 if (stripe_index < num)
2992 j++;
2993 }
2994 *offset = last_offset + j * map->stripe_len;
2995 return 1;
2996}
2997
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002998static void scrub_free_parity(struct scrub_parity *sparity)
2999{
3000 struct scrub_ctx *sctx = sparity->sctx;
3001 struct scrub_page *curr, *next;
3002 int nbits;
3003
3004 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
3005 if (nbits) {
3006 spin_lock(&sctx->stat_lock);
3007 sctx->stat.read_errors += nbits;
3008 sctx->stat.uncorrectable_errors += nbits;
3009 spin_unlock(&sctx->stat_lock);
3010 }
3011
3012 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
3013 list_del_init(&curr->list);
3014 scrub_page_put(curr);
3015 }
3016
3017 kfree(sparity);
3018}
3019
Zhao Lei20b2e302015-06-04 20:09:15 +08003020static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
3021{
3022 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
3023 work);
3024 struct scrub_ctx *sctx = sparity->sctx;
3025
3026 scrub_free_parity(sparity);
3027 scrub_pending_bio_dec(sctx);
3028}
3029
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02003030static void scrub_parity_bio_endio(struct bio *bio)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003031{
3032 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003033 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003034
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003035 if (bio->bi_status)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003036 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3037 sparity->nsectors);
3038
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003039 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08003040
3041 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
3042 scrub_parity_bio_endio_worker, NULL, NULL);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003043 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003044}
3045
3046static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
3047{
3048 struct scrub_ctx *sctx = sparity->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003049 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003050 struct bio *bio;
3051 struct btrfs_raid_bio *rbio;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003052 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003053 u64 length;
3054 int ret;
3055
3056 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
3057 sparity->nsectors))
3058 goto out;
3059
Zhao Leia0dd59d2015-07-21 15:42:26 +08003060 length = sparity->logic_end - sparity->logic_start;
Qu Wenruoae6529c2017-03-29 09:33:21 +08003061
3062 btrfs_bio_counter_inc_blocked(fs_info);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003063 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
David Sterba825ad4c2017-03-28 14:45:22 +02003064 &length, &bbio);
Zhao Lei8e5cfb52015-01-20 15:11:33 +08003065 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003066 goto bbio_out;
3067
David Sterbac5e4c3d2017-06-12 17:29:41 +02003068 bio = btrfs_io_bio_alloc(0);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003069 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
3070 bio->bi_private = sparity;
3071 bio->bi_end_io = scrub_parity_bio_endio;
3072
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003073 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08003074 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003075 sparity->dbitmap,
3076 sparity->nsectors);
3077 if (!rbio)
3078 goto rbio_out;
3079
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003080 scrub_pending_bio_inc(sctx);
3081 raid56_parity_submit_scrub_rbio(rbio);
3082 return;
3083
3084rbio_out:
3085 bio_put(bio);
3086bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08003087 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08003088 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003089 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
3090 sparity->nsectors);
3091 spin_lock(&sctx->stat_lock);
3092 sctx->stat.malloc_errors++;
3093 spin_unlock(&sctx->stat_lock);
3094out:
3095 scrub_free_parity(sparity);
3096}
3097
3098static inline int scrub_calc_parity_bitmap_len(int nsectors)
3099{
Zhao Leibfca9a62014-12-08 19:55:57 +08003100 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003101}
3102
3103static void scrub_parity_get(struct scrub_parity *sparity)
3104{
Elena Reshetova78a76452017-03-03 10:55:24 +02003105 refcount_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003106}
3107
3108static void scrub_parity_put(struct scrub_parity *sparity)
3109{
Elena Reshetova78a76452017-03-03 10:55:24 +02003110 if (!refcount_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003111 return;
3112
3113 scrub_parity_check_and_repair(sparity);
3114}
3115
3116static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3117 struct map_lookup *map,
3118 struct btrfs_device *sdev,
3119 struct btrfs_path *path,
3120 u64 logic_start,
3121 u64 logic_end)
3122{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003123 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003124 struct btrfs_root *root = fs_info->extent_root;
3125 struct btrfs_root *csum_root = fs_info->csum_root;
3126 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07003127 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003128 u64 flags;
3129 int ret;
3130 int slot;
3131 struct extent_buffer *l;
3132 struct btrfs_key key;
3133 u64 generation;
3134 u64 extent_logical;
3135 u64 extent_physical;
3136 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07003137 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003138 struct btrfs_device *extent_dev;
3139 struct scrub_parity *sparity;
3140 int nsectors;
3141 int bitmap_len;
3142 int extent_mirror_num;
3143 int stop_loop = 0;
3144
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003145 nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003146 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
3147 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
3148 GFP_NOFS);
3149 if (!sparity) {
3150 spin_lock(&sctx->stat_lock);
3151 sctx->stat.malloc_errors++;
3152 spin_unlock(&sctx->stat_lock);
3153 return -ENOMEM;
3154 }
3155
3156 sparity->stripe_len = map->stripe_len;
3157 sparity->nsectors = nsectors;
3158 sparity->sctx = sctx;
3159 sparity->scrub_dev = sdev;
3160 sparity->logic_start = logic_start;
3161 sparity->logic_end = logic_end;
Elena Reshetova78a76452017-03-03 10:55:24 +02003162 refcount_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003163 INIT_LIST_HEAD(&sparity->spages);
3164 sparity->dbitmap = sparity->bitmap;
3165 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
3166
3167 ret = 0;
3168 while (logic_start < logic_end) {
3169 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3170 key.type = BTRFS_METADATA_ITEM_KEY;
3171 else
3172 key.type = BTRFS_EXTENT_ITEM_KEY;
3173 key.objectid = logic_start;
3174 key.offset = (u64)-1;
3175
3176 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3177 if (ret < 0)
3178 goto out;
3179
3180 if (ret > 0) {
3181 ret = btrfs_previous_extent_item(root, path, 0);
3182 if (ret < 0)
3183 goto out;
3184 if (ret > 0) {
3185 btrfs_release_path(path);
3186 ret = btrfs_search_slot(NULL, root, &key,
3187 path, 0, 0);
3188 if (ret < 0)
3189 goto out;
3190 }
3191 }
3192
3193 stop_loop = 0;
3194 while (1) {
3195 u64 bytes;
3196
3197 l = path->nodes[0];
3198 slot = path->slots[0];
3199 if (slot >= btrfs_header_nritems(l)) {
3200 ret = btrfs_next_leaf(root, path);
3201 if (ret == 0)
3202 continue;
3203 if (ret < 0)
3204 goto out;
3205
3206 stop_loop = 1;
3207 break;
3208 }
3209 btrfs_item_key_to_cpu(l, &key, slot);
3210
Zhao Leid7cad232015-07-22 13:14:48 +08003211 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3212 key.type != BTRFS_METADATA_ITEM_KEY)
3213 goto next;
3214
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003215 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003216 bytes = fs_info->nodesize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003217 else
3218 bytes = key.offset;
3219
3220 if (key.objectid + bytes <= logic_start)
3221 goto next;
3222
Zhao Leia0dd59d2015-07-21 15:42:26 +08003223 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003224 stop_loop = 1;
3225 break;
3226 }
3227
3228 while (key.objectid >= logic_start + map->stripe_len)
3229 logic_start += map->stripe_len;
3230
3231 extent = btrfs_item_ptr(l, slot,
3232 struct btrfs_extent_item);
3233 flags = btrfs_extent_flags(l, extent);
3234 generation = btrfs_extent_generation(l, extent);
3235
Zhao Leia323e812015-07-23 12:29:49 +08003236 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3237 (key.objectid < logic_start ||
3238 key.objectid + bytes >
3239 logic_start + map->stripe_len)) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003240 btrfs_err(fs_info,
3241 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Zhao Leia323e812015-07-23 12:29:49 +08003242 key.objectid, logic_start);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003243 spin_lock(&sctx->stat_lock);
3244 sctx->stat.uncorrectable_errors++;
3245 spin_unlock(&sctx->stat_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003246 goto next;
3247 }
3248again:
3249 extent_logical = key.objectid;
3250 extent_len = bytes;
3251
3252 if (extent_logical < logic_start) {
3253 extent_len -= logic_start - extent_logical;
3254 extent_logical = logic_start;
3255 }
3256
3257 if (extent_logical + extent_len >
3258 logic_start + map->stripe_len)
3259 extent_len = logic_start + map->stripe_len -
3260 extent_logical;
3261
3262 scrub_parity_mark_sectors_data(sparity, extent_logical,
3263 extent_len);
3264
Omar Sandoval4a770892015-06-19 11:52:52 -07003265 mapped_length = extent_len;
Zhao Leif1fee652016-05-17 17:37:38 +08003266 bbio = NULL;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02003267 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3268 extent_logical, &mapped_length, &bbio,
3269 0);
Omar Sandoval4a770892015-06-19 11:52:52 -07003270 if (!ret) {
3271 if (!bbio || mapped_length < extent_len)
3272 ret = -EIO;
3273 }
3274 if (ret) {
3275 btrfs_put_bbio(bbio);
3276 goto out;
3277 }
3278 extent_physical = bbio->stripes[0].physical;
3279 extent_mirror_num = bbio->mirror_num;
3280 extent_dev = bbio->stripes[0].dev;
3281 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003282
3283 ret = btrfs_lookup_csums_range(csum_root,
3284 extent_logical,
3285 extent_logical + extent_len - 1,
3286 &sctx->csum_list, 1);
3287 if (ret)
3288 goto out;
3289
3290 ret = scrub_extent_for_parity(sparity, extent_logical,
3291 extent_len,
3292 extent_physical,
3293 extent_dev, flags,
3294 generation,
3295 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003296
3297 scrub_free_csums(sctx);
3298
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003299 if (ret)
3300 goto out;
3301
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003302 if (extent_logical + extent_len <
3303 key.objectid + bytes) {
3304 logic_start += map->stripe_len;
3305
3306 if (logic_start >= logic_end) {
3307 stop_loop = 1;
3308 break;
3309 }
3310
3311 if (logic_start < key.objectid + bytes) {
3312 cond_resched();
3313 goto again;
3314 }
3315 }
3316next:
3317 path->slots[0]++;
3318 }
3319
3320 btrfs_release_path(path);
3321
3322 if (stop_loop)
3323 break;
3324
3325 logic_start += map->stripe_len;
3326 }
3327out:
3328 if (ret < 0)
3329 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003330 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003331 scrub_parity_put(sparity);
3332 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003333 mutex_lock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003334 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003335 mutex_unlock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003336
3337 btrfs_release_path(path);
3338 return ret < 0 ? ret : 0;
3339}
3340
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003341static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003342 struct map_lookup *map,
3343 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003344 int num, u64 base, u64 length,
3345 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003346{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003347 struct btrfs_path *path, *ppath;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003348 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003349 struct btrfs_root *root = fs_info->extent_root;
3350 struct btrfs_root *csum_root = fs_info->csum_root;
3351 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003352 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003353 u64 flags;
3354 int ret;
3355 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003356 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003357 struct extent_buffer *l;
Arne Jansena2de7332011-03-08 14:14:00 +01003358 u64 physical;
3359 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003360 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003361 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003362 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003363 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003364 struct reada_control *reada1;
3365 struct reada_control *reada2;
David Sterbae6c11f92016-03-24 18:00:53 +01003366 struct btrfs_key key;
Arne Jansen7a262852011-06-10 12:39:23 +02003367 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003368 u64 increment = map->stripe_len;
3369 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003370 u64 extent_logical;
3371 u64 extent_physical;
3372 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003373 u64 stripe_logical;
3374 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003375 struct btrfs_device *extent_dev;
3376 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003377 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003378
Wang Shilong3b080b22014-04-01 18:01:43 +08003379 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003380 offset = 0;
Liu Bo42c61ab2017-04-03 13:45:24 -07003381 nstripes = div64_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003382 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3383 offset = map->stripe_len * num;
3384 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003385 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003386 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3387 int factor = map->num_stripes / map->sub_stripes;
3388 offset = map->stripe_len * (num / map->sub_stripes);
3389 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003390 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003391 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3392 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003393 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003394 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3395 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003396 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003397 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003398 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003399 increment = map->stripe_len * nr_data_stripes(map);
3400 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003401 } else {
3402 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003403 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003404 }
3405
3406 path = btrfs_alloc_path();
3407 if (!path)
3408 return -ENOMEM;
3409
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003410 ppath = btrfs_alloc_path();
3411 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003412 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003413 return -ENOMEM;
3414 }
3415
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003416 /*
3417 * work on commit root. The related disk blocks are static as
3418 * long as COW is applied. This means, it is save to rewrite
3419 * them to repair disk errors without any race conditions
3420 */
Arne Jansena2de7332011-03-08 14:14:00 +01003421 path->search_commit_root = 1;
3422 path->skip_locking = 1;
3423
Gui Hecheng063c54d2015-01-09 09:39:40 +08003424 ppath->search_commit_root = 1;
3425 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003426 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003427 * trigger the readahead for extent tree csum tree and wait for
3428 * completion. During readahead, the scrub is officially paused
3429 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003430 */
3431 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003432 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003433 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003434 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003435 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003436 logic_end += base;
3437 } else {
3438 logic_end = logical + increment * nstripes;
3439 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003440 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003441 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003442 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003443
Arne Jansen7a262852011-06-10 12:39:23 +02003444 /* FIXME it might be better to start readahead at commit root */
David Sterbae6c11f92016-03-24 18:00:53 +01003445 key.objectid = logical;
3446 key.type = BTRFS_EXTENT_ITEM_KEY;
3447 key.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003448 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003449 key_end.type = BTRFS_METADATA_ITEM_KEY;
3450 key_end.offset = (u64)-1;
David Sterbae6c11f92016-03-24 18:00:53 +01003451 reada1 = btrfs_reada_add(root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003452
David Sterbae6c11f92016-03-24 18:00:53 +01003453 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3454 key.type = BTRFS_EXTENT_CSUM_KEY;
3455 key.offset = logical;
Arne Jansen7a262852011-06-10 12:39:23 +02003456 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3457 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003458 key_end.offset = logic_end;
David Sterbae6c11f92016-03-24 18:00:53 +01003459 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003460
Arne Jansen7a262852011-06-10 12:39:23 +02003461 if (!IS_ERR(reada1))
3462 btrfs_reada_wait(reada1);
3463 if (!IS_ERR(reada2))
3464 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003465
Arne Jansena2de7332011-03-08 14:14:00 +01003466
3467 /*
3468 * collect all data csums for the stripe to avoid seeking during
3469 * the scrub. This might currently (crc32) end up to be about 1MB
3470 */
Arne Jansene7786c32011-05-28 20:58:38 +00003471 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003472
Arne Jansena2de7332011-03-08 14:14:00 +01003473 /*
3474 * now find all extents for each stripe and scrub them
3475 */
Arne Jansena2de7332011-03-08 14:14:00 +01003476 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003477 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003478 /*
3479 * canceled?
3480 */
3481 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003482 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003483 ret = -ECANCELED;
3484 goto out;
3485 }
3486 /*
3487 * check to see if we have to pause
3488 */
3489 if (atomic_read(&fs_info->scrub_pause_req)) {
3490 /* push queued extents */
David Sterba2073c4c2017-03-31 17:12:51 +02003491 sctx->flush_all_writes = true;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003492 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003493 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003494 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003495 mutex_unlock(&sctx->wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003496 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003497 atomic_read(&sctx->bios_in_flight) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003498 sctx->flush_all_writes = false;
Wang Shilong3cb09292013-12-04 21:15:19 +08003499 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003500 }
3501
Zhao Leif2f66a22015-07-21 12:22:29 +08003502 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3503 ret = get_raid56_logic_offset(physical, num, map,
3504 &logical,
3505 &stripe_logical);
3506 logical += base;
3507 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003508 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003509 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003510 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003511 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3512 ppath, stripe_logical,
3513 stripe_end);
3514 if (ret)
3515 goto out;
3516 goto skip;
3517 }
3518 }
3519
Wang Shilong7c76edb2014-01-12 21:38:32 +08003520 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3521 key.type = BTRFS_METADATA_ITEM_KEY;
3522 else
3523 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003524 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003525 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003526
3527 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3528 if (ret < 0)
3529 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003530
Arne Jansen8c510322011-06-03 10:09:26 +02003531 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003532 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003533 if (ret < 0)
3534 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003535 if (ret > 0) {
3536 /* there's no smaller item, so stick with the
3537 * larger one */
3538 btrfs_release_path(path);
3539 ret = btrfs_search_slot(NULL, root, &key,
3540 path, 0, 0);
3541 if (ret < 0)
3542 goto out;
3543 }
Arne Jansena2de7332011-03-08 14:14:00 +01003544 }
3545
Liu Bo625f1c8d2013-04-27 02:56:57 +00003546 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003547 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003548 u64 bytes;
3549
Arne Jansena2de7332011-03-08 14:14:00 +01003550 l = path->nodes[0];
3551 slot = path->slots[0];
3552 if (slot >= btrfs_header_nritems(l)) {
3553 ret = btrfs_next_leaf(root, path);
3554 if (ret == 0)
3555 continue;
3556 if (ret < 0)
3557 goto out;
3558
Liu Bo625f1c8d2013-04-27 02:56:57 +00003559 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003560 break;
3561 }
3562 btrfs_item_key_to_cpu(l, &key, slot);
3563
Zhao Leid7cad232015-07-22 13:14:48 +08003564 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3565 key.type != BTRFS_METADATA_ITEM_KEY)
3566 goto next;
3567
Josef Bacik3173a182013-03-07 14:22:04 -05003568 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003569 bytes = fs_info->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003570 else
3571 bytes = key.offset;
3572
3573 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003574 goto next;
3575
Liu Bo625f1c8d2013-04-27 02:56:57 +00003576 if (key.objectid >= logical + map->stripe_len) {
3577 /* out of this device extent */
3578 if (key.objectid >= logic_end)
3579 stop_loop = 1;
3580 break;
3581 }
Arne Jansena2de7332011-03-08 14:14:00 +01003582
3583 extent = btrfs_item_ptr(l, slot,
3584 struct btrfs_extent_item);
3585 flags = btrfs_extent_flags(l, extent);
3586 generation = btrfs_extent_generation(l, extent);
3587
Zhao Leia323e812015-07-23 12:29:49 +08003588 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3589 (key.objectid < logical ||
3590 key.objectid + bytes >
3591 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003592 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003593 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003594 key.objectid, logical);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003595 spin_lock(&sctx->stat_lock);
3596 sctx->stat.uncorrectable_errors++;
3597 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003598 goto next;
3599 }
3600
Liu Bo625f1c8d2013-04-27 02:56:57 +00003601again:
3602 extent_logical = key.objectid;
3603 extent_len = bytes;
3604
Arne Jansena2de7332011-03-08 14:14:00 +01003605 /*
3606 * trim extent to this stripe
3607 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003608 if (extent_logical < logical) {
3609 extent_len -= logical - extent_logical;
3610 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003611 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003612 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003613 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003614 extent_len = logical + map->stripe_len -
3615 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003616 }
3617
Liu Bo625f1c8d2013-04-27 02:56:57 +00003618 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003619 extent_dev = scrub_dev;
3620 extent_mirror_num = mirror_num;
3621 if (is_dev_replace)
3622 scrub_remap_extent(fs_info, extent_logical,
3623 extent_len, &extent_physical,
3624 &extent_dev,
3625 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003626
Zhao Leife8cf652015-07-22 13:14:47 +08003627 ret = btrfs_lookup_csums_range(csum_root,
3628 extent_logical,
3629 extent_logical +
3630 extent_len - 1,
3631 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003632 if (ret)
3633 goto out;
3634
Liu Bo6ca17652018-03-07 12:08:09 -07003635 ret = scrub_extent(sctx, map, extent_logical, extent_len,
Liu Bo625f1c8d2013-04-27 02:56:57 +00003636 extent_physical, extent_dev, flags,
3637 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003638 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003639
3640 scrub_free_csums(sctx);
3641
Liu Bo625f1c8d2013-04-27 02:56:57 +00003642 if (ret)
3643 goto out;
3644
3645 if (extent_logical + extent_len <
3646 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003647 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003648 /*
3649 * loop until we find next data stripe
3650 * or we have finished all stripes.
3651 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003652loop:
3653 physical += map->stripe_len;
3654 ret = get_raid56_logic_offset(physical,
3655 num, map, &logical,
3656 &stripe_logical);
3657 logical += base;
3658
3659 if (ret && physical < physical_end) {
3660 stripe_logical += base;
3661 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003662 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003663 ret = scrub_raid56_parity(sctx,
3664 map, scrub_dev, ppath,
3665 stripe_logical,
3666 stripe_end);
3667 if (ret)
3668 goto out;
3669 goto loop;
3670 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003671 } else {
3672 physical += map->stripe_len;
3673 logical += increment;
3674 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003675 if (logical < key.objectid + bytes) {
3676 cond_resched();
3677 goto again;
3678 }
3679
Wang Shilong3b080b22014-04-01 18:01:43 +08003680 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003681 stop_loop = 1;
3682 break;
3683 }
3684 }
Arne Jansena2de7332011-03-08 14:14:00 +01003685next:
3686 path->slots[0]++;
3687 }
Chris Mason71267332011-05-23 06:30:52 -04003688 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003689skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003690 logical += increment;
3691 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003692 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003693 if (stop_loop)
3694 sctx->stat.last_physical = map->stripes[num].physical +
3695 length;
3696 else
3697 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003698 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003699 if (stop_loop)
3700 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003701 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003702out:
Arne Jansena2de7332011-03-08 14:14:00 +01003703 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003704 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003705 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003706 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003707 mutex_unlock(&sctx->wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003708
Arne Jansene7786c32011-05-28 20:58:38 +00003709 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003710 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003711 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003712 return ret < 0 ? ret : 0;
3713}
3714
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003715static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003716 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003717 u64 chunk_offset, u64 length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003718 u64 dev_offset,
3719 struct btrfs_block_group_cache *cache,
3720 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003721{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003722 struct btrfs_fs_info *fs_info = sctx->fs_info;
3723 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003724 struct map_lookup *map;
3725 struct extent_map *em;
3726 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003727 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003728
3729 read_lock(&map_tree->map_tree.lock);
3730 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3731 read_unlock(&map_tree->map_tree.lock);
3732
Filipe Manana020d5b72015-11-19 10:57:20 +00003733 if (!em) {
3734 /*
3735 * Might have been an unused block group deleted by the cleaner
3736 * kthread or relocation.
3737 */
3738 spin_lock(&cache->lock);
3739 if (!cache->removed)
3740 ret = -EINVAL;
3741 spin_unlock(&cache->lock);
3742
3743 return ret;
3744 }
Arne Jansena2de7332011-03-08 14:14:00 +01003745
Jeff Mahoney95617d62015-06-03 10:55:48 -04003746 map = em->map_lookup;
Arne Jansena2de7332011-03-08 14:14:00 +01003747 if (em->start != chunk_offset)
3748 goto out;
3749
3750 if (em->len < length)
3751 goto out;
3752
3753 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003754 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003755 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003756 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003757 chunk_offset, length,
3758 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003759 if (ret)
3760 goto out;
3761 }
3762 }
3763out:
3764 free_extent_map(em);
3765
3766 return ret;
3767}
3768
3769static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003770int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003771 struct btrfs_device *scrub_dev, u64 start, u64 end,
3772 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003773{
3774 struct btrfs_dev_extent *dev_extent = NULL;
3775 struct btrfs_path *path;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003776 struct btrfs_fs_info *fs_info = sctx->fs_info;
3777 struct btrfs_root *root = fs_info->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003778 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003779 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003780 int ret = 0;
Zhaolei76a8efa2015-11-17 18:46:17 +08003781 int ro_set;
Arne Jansena2de7332011-03-08 14:14:00 +01003782 int slot;
3783 struct extent_buffer *l;
3784 struct btrfs_key key;
3785 struct btrfs_key found_key;
3786 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003787 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003788
3789 path = btrfs_alloc_path();
3790 if (!path)
3791 return -ENOMEM;
3792
David Sterbae4058b52015-11-27 16:31:35 +01003793 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +01003794 path->search_commit_root = 1;
3795 path->skip_locking = 1;
3796
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003797 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003798 key.offset = 0ull;
3799 key.type = BTRFS_DEV_EXTENT_KEY;
3800
Arne Jansena2de7332011-03-08 14:14:00 +01003801 while (1) {
3802 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3803 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003804 break;
3805 if (ret > 0) {
3806 if (path->slots[0] >=
3807 btrfs_header_nritems(path->nodes[0])) {
3808 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003809 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003810 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003811 if (ret > 0) {
3812 ret = 0;
3813 break;
3814 }
3815 } else {
3816 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003817 }
3818 }
Arne Jansena2de7332011-03-08 14:14:00 +01003819
3820 l = path->nodes[0];
3821 slot = path->slots[0];
3822
3823 btrfs_item_key_to_cpu(l, &found_key, slot);
3824
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003825 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003826 break;
3827
David Sterba962a2982014-06-04 18:41:45 +02003828 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003829 break;
3830
3831 if (found_key.offset >= end)
3832 break;
3833
3834 if (found_key.offset < key.offset)
3835 break;
3836
3837 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3838 length = btrfs_dev_extent_length(l, dev_extent);
3839
Qu Wenruoced96ed2014-06-19 10:42:51 +08003840 if (found_key.offset + length <= start)
3841 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003842
Arne Jansena2de7332011-03-08 14:14:00 +01003843 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3844
3845 /*
3846 * get a reference on the corresponding block group to prevent
3847 * the chunk from going away while we scrub it
3848 */
3849 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003850
3851 /* some chunks are removed but not committed to disk yet,
3852 * continue scrubbing */
3853 if (!cache)
3854 goto skip;
3855
Zhaolei55e3a602015-08-05 16:43:30 +08003856 /*
3857 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3858 * to avoid deadlock caused by:
3859 * btrfs_inc_block_group_ro()
3860 * -> btrfs_wait_for_commit()
3861 * -> btrfs_commit_transaction()
3862 * -> btrfs_scrub_pause()
3863 */
3864 scrub_pause_on(fs_info);
Jeff Mahoney5e00f192017-02-15 16:28:29 -05003865 ret = btrfs_inc_block_group_ro(fs_info, cache);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003866 if (!ret && is_dev_replace) {
3867 /*
3868 * If we are doing a device replace wait for any tasks
3869 * that started dellaloc right before we set the block
3870 * group to RO mode, as they might have just allocated
3871 * an extent from it or decided they could do a nocow
3872 * write. And if any such tasks did that, wait for their
3873 * ordered extents to complete and then commit the
3874 * current transaction, so that we can later see the new
3875 * extent items in the extent tree - the ordered extents
3876 * create delayed data references (for cow writes) when
3877 * they complete, which will be run and insert the
3878 * corresponding extent items into the extent tree when
3879 * we commit the transaction they used when running
3880 * inode.c:btrfs_finish_ordered_io(). We later use
3881 * the commit root of the extent tree to find extents
3882 * to copy from the srcdev into the tgtdev, and we don't
3883 * want to miss any new extents.
3884 */
3885 btrfs_wait_block_group_reservations(cache);
3886 btrfs_wait_nocow_writers(cache);
Chris Mason6374e57a2017-06-23 09:48:21 -07003887 ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003888 cache->key.objectid,
3889 cache->key.offset);
3890 if (ret > 0) {
3891 struct btrfs_trans_handle *trans;
3892
3893 trans = btrfs_join_transaction(root);
3894 if (IS_ERR(trans))
3895 ret = PTR_ERR(trans);
3896 else
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003897 ret = btrfs_commit_transaction(trans);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003898 if (ret) {
3899 scrub_pause_off(fs_info);
3900 btrfs_put_block_group(cache);
3901 break;
3902 }
3903 }
3904 }
Zhaolei55e3a602015-08-05 16:43:30 +08003905 scrub_pause_off(fs_info);
Zhaolei76a8efa2015-11-17 18:46:17 +08003906
3907 if (ret == 0) {
3908 ro_set = 1;
3909 } else if (ret == -ENOSPC) {
3910 /*
3911 * btrfs_inc_block_group_ro return -ENOSPC when it
3912 * failed in creating new chunk for metadata.
3913 * It is not a problem for scrub/replace, because
3914 * metadata are always cowed, and our scrub paused
3915 * commit_transactions.
3916 */
3917 ro_set = 0;
3918 } else {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003919 btrfs_warn(fs_info,
David Sterba913e1532017-07-13 15:32:18 +02003920 "failed setting block group ro: %d", ret);
Zhaolei55e3a602015-08-05 16:43:30 +08003921 btrfs_put_block_group(cache);
3922 break;
3923 }
3924
David Sterba7e79cb82018-03-24 02:11:38 +01003925 btrfs_dev_replace_write_lock(&fs_info->dev_replace);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003926 dev_replace->cursor_right = found_key.offset + length;
3927 dev_replace->cursor_left = found_key.offset;
3928 dev_replace->item_needs_writeback = 1;
David Sterba7e79cb82018-03-24 02:11:38 +01003929 btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
Zhao Lei8c204c92015-08-19 15:02:40 +08003930 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003931 found_key.offset, cache, is_dev_replace);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003932
3933 /*
3934 * flush, submit all pending read and write bios, afterwards
3935 * wait for them.
3936 * Note that in the dev replace case, a read request causes
3937 * write requests that are submitted in the read completion
3938 * worker. Therefore in the current situation, it is required
3939 * that all write requests are flushed, so that all read and
3940 * write requests are really completed when bios_in_flight
3941 * changes to 0.
3942 */
David Sterba2073c4c2017-03-31 17:12:51 +02003943 sctx->flush_all_writes = true;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003944 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003945 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003946 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003947 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003948
3949 wait_event(sctx->list_wait,
3950 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003951
3952 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003953
3954 /*
3955 * must be called before we decrease @scrub_paused.
3956 * make sure we don't block transaction commit while
3957 * we are waiting pending workers finished.
3958 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003959 wait_event(sctx->list_wait,
3960 atomic_read(&sctx->workers_pending) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003961 sctx->flush_all_writes = false;
Wang Shilong12cf9372014-02-19 19:24:17 +08003962
Zhaoleib708ce92015-08-05 16:43:29 +08003963 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003964
David Sterba7e79cb82018-03-24 02:11:38 +01003965 btrfs_dev_replace_write_lock(&fs_info->dev_replace);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003966 dev_replace->cursor_left = dev_replace->cursor_right;
3967 dev_replace->item_needs_writeback = 1;
David Sterba7e79cb82018-03-24 02:11:38 +01003968 btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003969
Zhaolei76a8efa2015-11-17 18:46:17 +08003970 if (ro_set)
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003971 btrfs_dec_block_group_ro(cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003972
Filipe Manana758f2df2015-11-19 11:45:48 +00003973 /*
3974 * We might have prevented the cleaner kthread from deleting
3975 * this block group if it was already unused because we raced
3976 * and set it to RO mode first. So add it back to the unused
3977 * list, otherwise it might not ever be deleted unless a manual
3978 * balance is triggered or it becomes used and unused again.
3979 */
3980 spin_lock(&cache->lock);
3981 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3982 btrfs_block_group_used(&cache->item) == 0) {
3983 spin_unlock(&cache->lock);
3984 spin_lock(&fs_info->unused_bgs_lock);
3985 if (list_empty(&cache->bg_list)) {
3986 btrfs_get_block_group(cache);
3987 list_add_tail(&cache->bg_list,
3988 &fs_info->unused_bgs);
3989 }
3990 spin_unlock(&fs_info->unused_bgs_lock);
3991 } else {
3992 spin_unlock(&cache->lock);
3993 }
3994
Arne Jansena2de7332011-03-08 14:14:00 +01003995 btrfs_put_block_group(cache);
3996 if (ret)
3997 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003998 if (is_dev_replace &&
3999 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004000 ret = -EIO;
4001 break;
4002 }
4003 if (sctx->stat.malloc_errors > 0) {
4004 ret = -ENOMEM;
4005 break;
4006 }
Qu Wenruoced96ed2014-06-19 10:42:51 +08004007skip:
Arne Jansena2de7332011-03-08 14:14:00 +01004008 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04004009 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01004010 }
4011
Arne Jansena2de7332011-03-08 14:14:00 +01004012 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02004013
Zhaolei55e3a602015-08-05 16:43:30 +08004014 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01004015}
4016
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01004017static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4018 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01004019{
4020 int i;
4021 u64 bytenr;
4022 u64 gen;
4023 int ret;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004024 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01004025
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004026 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01004027 return -EIO;
4028
Miao Xie5f546062014-07-24 11:37:09 +08004029 /* Seed devices of a new filesystem has their own generation. */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004030 if (scrub_dev->fs_devices != fs_info->fs_devices)
Miao Xie5f546062014-07-24 11:37:09 +08004031 gen = scrub_dev->generation;
4032 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004033 gen = fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01004034
4035 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4036 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08004037 if (bytenr + BTRFS_SUPER_INFO_SIZE >
4038 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01004039 break;
4040
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004041 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01004042 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004043 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01004044 if (ret)
4045 return ret;
4046 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004047 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004048
4049 return 0;
4050}
4051
4052/*
4053 * get a reference count on fs_info->scrub_workers. start worker if necessary
4054 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01004055static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4056 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01004057{
David Sterba6f011052015-02-16 18:34:01 +01004058 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08004059 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01004060
Arne Jansen632dd772011-06-10 12:07:07 +02004061 if (fs_info->scrub_workers_refcnt == 0) {
David Sterbaaf1cbe02017-03-31 18:42:57 +02004062 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
4063 flags, is_dev_replace ? 1 : max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08004064 if (!fs_info->scrub_workers)
4065 goto fail_scrub_workers;
4066
Qu Wenruo0339ef22014-02-28 10:46:17 +08004067 fs_info->scrub_wr_completion_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004068 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08004069 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08004070 if (!fs_info->scrub_wr_completion_workers)
4071 goto fail_scrub_wr_completion_workers;
4072
Qu Wenruo0339ef22014-02-28 10:46:17 +08004073 fs_info->scrub_nocow_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004074 btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
Zhao Leie82afc52015-06-12 20:36:58 +08004075 if (!fs_info->scrub_nocow_workers)
4076 goto fail_scrub_nocow_workers;
Zhao Lei20b2e302015-06-04 20:09:15 +08004077 fs_info->scrub_parity_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04004078 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
Zhao Lei20b2e302015-06-04 20:09:15 +08004079 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08004080 if (!fs_info->scrub_parity_workers)
4081 goto fail_scrub_parity_workers;
Arne Jansen632dd772011-06-10 12:07:07 +02004082 }
Arne Jansena2de7332011-03-08 14:14:00 +01004083 ++fs_info->scrub_workers_refcnt;
Zhao Leie82afc52015-06-12 20:36:58 +08004084 return 0;
4085
4086fail_scrub_parity_workers:
4087 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
4088fail_scrub_nocow_workers:
4089 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4090fail_scrub_wr_completion_workers:
4091 btrfs_destroy_workqueue(fs_info->scrub_workers);
4092fail_scrub_workers:
4093 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01004094}
4095
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004096static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004097{
Stefan Behrensff023aa2012-11-06 11:43:11 +01004098 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08004099 btrfs_destroy_workqueue(fs_info->scrub_workers);
4100 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
4101 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Zhao Lei20b2e302015-06-04 20:09:15 +08004102 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004103 }
Arne Jansena2de7332011-03-08 14:14:00 +01004104 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004105}
4106
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004107int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4108 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01004109 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01004110{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004111 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004112 int ret;
4113 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08004114 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01004115
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004116 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01004117 return -EINVAL;
4118
Jeff Mahoneyda170662016-06-15 09:22:56 -04004119 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004120 /*
4121 * in this case scrub is unable to calculate the checksum
4122 * the way scrub is implemented. Do not handle this
4123 * situation at all because it won't ever happen.
4124 */
Frank Holtonefe120a2013-12-20 11:37:06 -05004125 btrfs_err(fs_info,
4126 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004127 fs_info->nodesize,
4128 BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004129 return -EINVAL;
4130 }
4131
Jeff Mahoneyda170662016-06-15 09:22:56 -04004132 if (fs_info->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04004133 /* not supported for data w/o checksums */
Chandan Rajendra751bebbe2016-07-04 10:04:39 +05304134 btrfs_err_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004135 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004136 fs_info->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01004137 return -EINVAL;
4138 }
4139
Jeff Mahoneyda170662016-06-15 09:22:56 -04004140 if (fs_info->nodesize >
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004141 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
Jeff Mahoneyda170662016-06-15 09:22:56 -04004142 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004143 /*
4144 * would exhaust the array bounds of pagev member in
4145 * struct scrub_block
4146 */
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004147 btrfs_err(fs_info,
4148 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04004149 fs_info->nodesize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004150 SCRUB_MAX_PAGES_PER_BLOCK,
Jeff Mahoneyda170662016-06-15 09:22:56 -04004151 fs_info->sectorsize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01004152 SCRUB_MAX_PAGES_PER_BLOCK);
4153 return -EINVAL;
4154 }
4155
Arne Jansena2de7332011-03-08 14:14:00 +01004156
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004157 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4158 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Anand Jaine6e674b2017-12-04 12:54:54 +08004159 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4160 !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004161 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004162 return -ENODEV;
4163 }
Arne Jansena2de7332011-03-08 14:14:00 +01004164
Anand Jainebbede42017-12-04 12:54:52 +08004165 if (!is_dev_replace && !readonly &&
4166 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Miao Xie5d68da32014-07-24 11:37:07 +08004167 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4168 rcu_read_lock();
4169 name = rcu_dereference(dev->name);
4170 btrfs_err(fs_info, "scrub: device %s is not writable",
4171 name->str);
4172 rcu_read_unlock();
4173 return -EROFS;
4174 }
4175
Wang Shilong3b7a0162013-10-12 02:11:12 +08004176 mutex_lock(&fs_info->scrub_lock);
Anand Jaine12c9622017-12-04 12:54:53 +08004177 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
Anand Jain401e29c2017-12-04 12:54:55 +08004178 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
Arne Jansena2de7332011-03-08 14:14:00 +01004179 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004180 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004181 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01004182 }
4183
David Sterba7e79cb82018-03-24 02:11:38 +01004184 btrfs_dev_replace_read_lock(&fs_info->dev_replace);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004185 if (dev->scrub_ctx ||
Stefan Behrens8dabb742012-11-06 13:15:27 +01004186 (!is_dev_replace &&
4187 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
David Sterba7e79cb82018-03-24 02:11:38 +01004188 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01004189 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004190 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004191 return -EINPROGRESS;
4192 }
David Sterba7e79cb82018-03-24 02:11:38 +01004193 btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
Wang Shilong3b7a0162013-10-12 02:11:12 +08004194
4195 ret = scrub_workers_get(fs_info, is_dev_replace);
4196 if (ret) {
4197 mutex_unlock(&fs_info->scrub_lock);
4198 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4199 return ret;
4200 }
4201
Stefan Behrens63a212a2012-11-05 18:29:28 +01004202 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004203 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01004204 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004205 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4206 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004207 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01004208 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004209 sctx->readonly = readonly;
Anand Jaincadbc0a2018-01-03 16:08:30 +08004210 dev->scrub_ctx = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08004211 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004212
Wang Shilong3cb09292013-12-04 21:15:19 +08004213 /*
4214 * checking @scrub_pause_req here, we can avoid
4215 * race between committing transaction and scrubbing.
4216 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08004217 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01004218 atomic_inc(&fs_info->scrubs_running);
4219 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01004220
Stefan Behrensff023aa2012-11-06 11:43:11 +01004221 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08004222 /*
4223 * by holding device list mutex, we can
4224 * kick off writing super in log tree sync.
4225 */
Wang Shilong3cb09292013-12-04 21:15:19 +08004226 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004227 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08004228 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004229 }
Arne Jansena2de7332011-03-08 14:14:00 +01004230
4231 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004232 ret = scrub_enumerate_chunks(sctx, dev, start, end,
4233 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01004234
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004235 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01004236 atomic_dec(&fs_info->scrubs_running);
4237 wake_up(&fs_info->scrub_pause_wait);
4238
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01004239 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02004240
Arne Jansena2de7332011-03-08 14:14:00 +01004241 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004242 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01004243
4244 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004245 dev->scrub_ctx = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08004246 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01004247 mutex_unlock(&fs_info->scrub_lock);
4248
Filipe Mananaf55985f2015-02-09 21:14:24 +00004249 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01004250
4251 return ret;
4252}
4253
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004254void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004255{
Arne Jansena2de7332011-03-08 14:14:00 +01004256 mutex_lock(&fs_info->scrub_lock);
4257 atomic_inc(&fs_info->scrub_pause_req);
4258 while (atomic_read(&fs_info->scrubs_paused) !=
4259 atomic_read(&fs_info->scrubs_running)) {
4260 mutex_unlock(&fs_info->scrub_lock);
4261 wait_event(fs_info->scrub_pause_wait,
4262 atomic_read(&fs_info->scrubs_paused) ==
4263 atomic_read(&fs_info->scrubs_running));
4264 mutex_lock(&fs_info->scrub_lock);
4265 }
4266 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01004267}
4268
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004269void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004270{
Arne Jansena2de7332011-03-08 14:14:00 +01004271 atomic_dec(&fs_info->scrub_pause_req);
4272 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01004273}
4274
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004275int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01004276{
Arne Jansena2de7332011-03-08 14:14:00 +01004277 mutex_lock(&fs_info->scrub_lock);
4278 if (!atomic_read(&fs_info->scrubs_running)) {
4279 mutex_unlock(&fs_info->scrub_lock);
4280 return -ENOTCONN;
4281 }
4282
4283 atomic_inc(&fs_info->scrub_cancel_req);
4284 while (atomic_read(&fs_info->scrubs_running)) {
4285 mutex_unlock(&fs_info->scrub_lock);
4286 wait_event(fs_info->scrub_pause_wait,
4287 atomic_read(&fs_info->scrubs_running) == 0);
4288 mutex_lock(&fs_info->scrub_lock);
4289 }
4290 atomic_dec(&fs_info->scrub_cancel_req);
4291 mutex_unlock(&fs_info->scrub_lock);
4292
4293 return 0;
4294}
4295
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004296int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
4297 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01004298{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004299 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004300
4301 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004302 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004303 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004304 mutex_unlock(&fs_info->scrub_lock);
4305 return -ENOTCONN;
4306 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004307 atomic_inc(&sctx->cancel_req);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004308 while (dev->scrub_ctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004309 mutex_unlock(&fs_info->scrub_lock);
4310 wait_event(fs_info->scrub_pause_wait,
Anand Jaincadbc0a2018-01-03 16:08:30 +08004311 dev->scrub_ctx == NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004312 mutex_lock(&fs_info->scrub_lock);
4313 }
4314 mutex_unlock(&fs_info->scrub_lock);
4315
4316 return 0;
4317}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004318
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004319int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
Arne Jansena2de7332011-03-08 14:14:00 +01004320 struct btrfs_scrub_progress *progress)
4321{
4322 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004323 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004324
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004325 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4326 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004327 if (dev)
Anand Jaincadbc0a2018-01-03 16:08:30 +08004328 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004329 if (sctx)
4330 memcpy(progress, &sctx->stat, sizeof(*progress));
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004331 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004332
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004333 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004334}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004335
4336static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4337 u64 extent_logical, u64 extent_len,
4338 u64 *extent_physical,
4339 struct btrfs_device **extent_dev,
4340 int *extent_mirror_num)
4341{
4342 u64 mapped_length;
4343 struct btrfs_bio *bbio = NULL;
4344 int ret;
4345
4346 mapped_length = extent_len;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02004347 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004348 &mapped_length, &bbio, 0);
4349 if (ret || !bbio || mapped_length < extent_len ||
4350 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004351 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004352 return;
4353 }
4354
4355 *extent_physical = bbio->stripes[0].physical;
4356 *extent_mirror_num = bbio->mirror_num;
4357 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004358 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004359}
4360
Stefan Behrensff023aa2012-11-06 11:43:11 +01004361static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4362 int mirror_num, u64 physical_for_dev_replace)
4363{
4364 struct scrub_copy_nocow_ctx *nocow_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004365 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004366
4367 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4368 if (!nocow_ctx) {
4369 spin_lock(&sctx->stat_lock);
4370 sctx->stat.malloc_errors++;
4371 spin_unlock(&sctx->stat_lock);
4372 return -ENOMEM;
4373 }
4374
4375 scrub_pending_trans_workers_inc(sctx);
4376
4377 nocow_ctx->sctx = sctx;
4378 nocow_ctx->logical = logical;
4379 nocow_ctx->len = len;
4380 nocow_ctx->mirror_num = mirror_num;
4381 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08004382 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4383 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04004384 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08004385 btrfs_queue_work(fs_info->scrub_nocow_workers,
4386 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004387
4388 return 0;
4389}
4390
Josef Bacik652f25a2013-09-12 16:58:28 -04004391static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4392{
4393 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4394 struct scrub_nocow_inode *nocow_inode;
4395
4396 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4397 if (!nocow_inode)
4398 return -ENOMEM;
4399 nocow_inode->inum = inum;
4400 nocow_inode->offset = offset;
4401 nocow_inode->root = root;
4402 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4403 return 0;
4404}
4405
4406#define COPY_COMPLETE 1
4407
Stefan Behrensff023aa2012-11-06 11:43:11 +01004408static void copy_nocow_pages_worker(struct btrfs_work *work)
4409{
4410 struct scrub_copy_nocow_ctx *nocow_ctx =
4411 container_of(work, struct scrub_copy_nocow_ctx, work);
4412 struct scrub_ctx *sctx = nocow_ctx->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004413 struct btrfs_fs_info *fs_info = sctx->fs_info;
4414 struct btrfs_root *root = fs_info->extent_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004415 u64 logical = nocow_ctx->logical;
4416 u64 len = nocow_ctx->len;
4417 int mirror_num = nocow_ctx->mirror_num;
4418 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4419 int ret;
4420 struct btrfs_trans_handle *trans = NULL;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004421 struct btrfs_path *path;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004422 int not_written = 0;
4423
Stefan Behrensff023aa2012-11-06 11:43:11 +01004424 path = btrfs_alloc_path();
4425 if (!path) {
4426 spin_lock(&sctx->stat_lock);
4427 sctx->stat.malloc_errors++;
4428 spin_unlock(&sctx->stat_lock);
4429 not_written = 1;
4430 goto out;
4431 }
4432
4433 trans = btrfs_join_transaction(root);
4434 if (IS_ERR(trans)) {
4435 not_written = 1;
4436 goto out;
4437 }
4438
4439 ret = iterate_inodes_from_logical(logical, fs_info, path,
Zygo Blaxellc995ab32017-09-22 13:58:45 -04004440 record_inode_for_nocow, nocow_ctx, false);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004441 if (ret != 0 && ret != -ENOENT) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04004442 btrfs_warn(fs_info,
4443 "iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
4444 logical, physical_for_dev_replace, len, mirror_num,
4445 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004446 not_written = 1;
4447 goto out;
4448 }
4449
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04004450 btrfs_end_transaction(trans);
Josef Bacik652f25a2013-09-12 16:58:28 -04004451 trans = NULL;
4452 while (!list_empty(&nocow_ctx->inodes)) {
4453 struct scrub_nocow_inode *entry;
4454 entry = list_first_entry(&nocow_ctx->inodes,
4455 struct scrub_nocow_inode,
4456 list);
4457 list_del_init(&entry->list);
4458 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4459 entry->root, nocow_ctx);
4460 kfree(entry);
4461 if (ret == COPY_COMPLETE) {
4462 ret = 0;
4463 break;
4464 } else if (ret) {
4465 break;
4466 }
4467 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004468out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004469 while (!list_empty(&nocow_ctx->inodes)) {
4470 struct scrub_nocow_inode *entry;
4471 entry = list_first_entry(&nocow_ctx->inodes,
4472 struct scrub_nocow_inode,
4473 list);
4474 list_del_init(&entry->list);
4475 kfree(entry);
4476 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004477 if (trans && !IS_ERR(trans))
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04004478 btrfs_end_transaction(trans);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004479 if (not_written)
4480 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4481 num_uncorrectable_read_errors);
4482
4483 btrfs_free_path(path);
4484 kfree(nocow_ctx);
4485
4486 scrub_pending_trans_workers_dec(sctx);
4487}
4488
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004489static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
Gui Hecheng32159242014-11-10 15:36:08 +08004490 u64 logical)
4491{
4492 struct extent_state *cached_state = NULL;
4493 struct btrfs_ordered_extent *ordered;
4494 struct extent_io_tree *io_tree;
4495 struct extent_map *em;
4496 u64 lockstart = start, lockend = start + len - 1;
4497 int ret = 0;
4498
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004499 io_tree = &inode->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004500
David Sterbaff13db42015-12-03 14:30:40 +01004501 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004502 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
Gui Hecheng32159242014-11-10 15:36:08 +08004503 if (ordered) {
4504 btrfs_put_ordered_extent(ordered);
4505 ret = 1;
4506 goto out_unlock;
4507 }
4508
4509 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4510 if (IS_ERR(em)) {
4511 ret = PTR_ERR(em);
4512 goto out_unlock;
4513 }
4514
4515 /*
4516 * This extent does not actually cover the logical extent anymore,
4517 * move on to the next inode.
4518 */
4519 if (em->block_start > logical ||
Liu Boed5d5f32018-02-27 18:10:58 -07004520 em->block_start + em->block_len < logical + len ||
4521 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
Gui Hecheng32159242014-11-10 15:36:08 +08004522 free_extent_map(em);
4523 ret = 1;
4524 goto out_unlock;
4525 }
4526 free_extent_map(em);
4527
4528out_unlock:
David Sterbae43bbe52017-12-12 21:43:52 +01004529 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
Gui Hecheng32159242014-11-10 15:36:08 +08004530 return ret;
4531}
4532
Josef Bacik652f25a2013-09-12 16:58:28 -04004533static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4534 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004535{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004536 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004537 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004538 struct inode *inode;
4539 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004540 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004541 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004542 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004543 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004544 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004545 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004546 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004547 int ret = 0;
4548 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004549
4550 key.objectid = root;
4551 key.type = BTRFS_ROOT_ITEM_KEY;
4552 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004553
4554 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4555
Stefan Behrensff023aa2012-11-06 11:43:11 +01004556 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004557 if (IS_ERR(local_root)) {
4558 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004559 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004560 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004561
4562 key.type = BTRFS_INODE_ITEM_KEY;
4563 key.objectid = inum;
4564 key.offset = 0;
4565 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004566 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004567 if (IS_ERR(inode))
4568 return PTR_ERR(inode);
4569
Miao Xieedd14002013-06-27 18:51:00 +08004570 /* Avoid truncate/dio/punch hole.. */
Al Viro59551022016-01-22 15:40:57 -05004571 inode_lock(inode);
Miao Xieedd14002013-06-27 18:51:00 +08004572 inode_dio_wait(inode);
4573
Stefan Behrensff023aa2012-11-06 11:43:11 +01004574 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004575 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004576 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004577
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004578 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
4579 nocow_ctx_logical);
Gui Hecheng32159242014-11-10 15:36:08 +08004580 if (ret) {
4581 ret = ret > 0 ? 0 : ret;
4582 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004583 }
4584
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004585 while (len >= PAGE_SIZE) {
4586 index = offset >> PAGE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004587again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004588 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4589 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004590 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004591 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004592 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004593 }
4594
4595 if (PageUptodate(page)) {
4596 if (PageDirty(page))
4597 goto next_page;
4598 } else {
4599 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004600 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004601 btrfs_get_extent,
4602 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004603 if (err) {
4604 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004605 goto next_page;
4606 }
Miao Xieedd14002013-06-27 18:51:00 +08004607
Miao Xie26b258912013-06-27 18:50:58 +08004608 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004609 /*
4610 * If the page has been remove from the page cache,
4611 * the data on it is meaningless, because it may be
4612 * old one, the new data may be written into the new
4613 * page in the page cache.
4614 */
4615 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004616 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004617 put_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004618 goto again;
4619 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004620 if (!PageUptodate(page)) {
4621 ret = -EIO;
4622 goto next_page;
4623 }
4624 }
Gui Hecheng32159242014-11-10 15:36:08 +08004625
Nikolay Borisov1c8c9c52017-02-20 13:51:05 +02004626 ret = check_extent_to_block(BTRFS_I(inode), offset, len,
Gui Hecheng32159242014-11-10 15:36:08 +08004627 nocow_ctx_logical);
4628 if (ret) {
4629 ret = ret > 0 ? 0 : ret;
4630 goto next_page;
4631 }
4632
Miao Xie826aa0a2013-06-27 18:50:59 +08004633 err = write_page_nocow(nocow_ctx->sctx,
4634 physical_for_dev_replace, page);
4635 if (err)
4636 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004637next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004638 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004639 put_page(page);
Miao Xie826aa0a2013-06-27 18:50:59 +08004640
4641 if (ret)
4642 break;
4643
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004644 offset += PAGE_SIZE;
4645 physical_for_dev_replace += PAGE_SIZE;
4646 nocow_ctx_logical += PAGE_SIZE;
4647 len -= PAGE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004648 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004649 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004650out:
Al Viro59551022016-01-22 15:40:57 -05004651 inode_unlock(inode);
Miao Xie826aa0a2013-06-27 18:50:59 +08004652 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004653 return ret;
4654}
4655
4656static int write_page_nocow(struct scrub_ctx *sctx,
4657 u64 physical_for_dev_replace, struct page *page)
4658{
4659 struct bio *bio;
4660 struct btrfs_device *dev;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004661
David Sterba3fb99302017-05-16 19:10:32 +02004662 dev = sctx->wr_tgtdev;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004663 if (!dev)
4664 return -EIO;
4665 if (!dev->bdev) {
Jeff Mahoneyfb456252016-06-22 18:54:56 -04004666 btrfs_warn_rl(dev->fs_info,
David Sterba94647322015-10-08 11:01:36 +02004667 "scrub write_page_nocow(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004668 return -EIO;
4669 }
David Sterbac5e4c3d2017-06-12 17:29:41 +02004670 bio = btrfs_io_bio_alloc(1);
Kent Overstreet4f024f32013-10-11 15:44:27 -07004671 bio->bi_iter.bi_size = 0;
4672 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Christoph Hellwig74d46992017-08-23 19:10:32 +02004673 bio_set_dev(bio, dev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -06004674 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
Anand Jain7ef2d6a72018-01-05 10:47:07 +08004675 /* bio_add_page won't fail on a freshly allocated bio */
4676 bio_add_page(bio, page, PAGE_SIZE, 0);
4677
4678 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004679 bio_put(bio);
4680 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4681 return -EIO;
4682 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004683
Stefan Behrensff023aa2012-11-06 11:43:11 +01004684 bio_put(bio);
4685 return 0;
4686}