blob: 3f664e6928312f9ecfad187489e193b46d76a482 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Arne Jansena2de7332011-03-08 14:14:00 +01002/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01004 */
5
Arne Jansena2de7332011-03-08 14:14:00 +01006#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +02007#include <linux/ratelimit.h>
David Sterbade2491f2017-05-31 19:21:38 +02008#include <linux/sched/mm.h>
Arne Jansena2de7332011-03-08 14:14:00 +01009#include "ctree.h"
10#include "volumes.h"
11#include "disk-io.h"
12#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020013#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020014#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020015#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010016#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010017#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040018#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050019#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010020
21/*
22 * This is only the first step towards a full-features scrub. It reads all
23 * extent and super block and verifies the checksums. In case a bad checksum
24 * is found or the extent cannot be read, good data will be written back if
25 * any can be found.
26 *
27 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010028 * - In case an unrepairable extent is encountered, track which files are
29 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010030 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010031 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010032 */
33
Stefan Behrensb5d67f62012-03-27 14:21:27 -040034struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010035struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010036
Stefan Behrensff023aa2012-11-06 11:43:11 +010037/*
38 * the following three values only influence the performance.
39 * The last one configures the number of parallel and outstanding I/O
40 * operations. The first two values configure an upper limit for the number
41 * of (dynamically allocated) pages that are added to a bio.
42 */
43#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
44#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
45#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010046
47/*
48 * the following value times PAGE_SIZE needs to be large enough to match the
49 * largest node/leaf/sector size that shall be supported.
50 * Values larger than BTRFS_STRIPE_LEN are not supported.
51 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040052#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010053
Miao Xieaf8e2d12014-10-23 14:42:50 +080054struct scrub_recover {
Elena Reshetova6f615012017-03-03 10:55:21 +020055 refcount_t refs;
Miao Xieaf8e2d12014-10-23 14:42:50 +080056 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080057 u64 map_length;
58};
59
Arne Jansena2de7332011-03-08 14:14:00 +010060struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040061 struct scrub_block *sblock;
62 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020063 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080064 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010065 u64 flags; /* extent flags */
66 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040067 u64 logical;
68 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010069 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080070 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040071 struct {
72 unsigned int mirror_num:8;
73 unsigned int have_csum:1;
74 unsigned int io_error:1;
75 };
Arne Jansena2de7332011-03-08 14:14:00 +010076 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080077
78 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010079};
80
81struct scrub_bio {
82 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010083 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010084 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010085 struct bio *bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020086 blk_status_t status;
Arne Jansena2de7332011-03-08 14:14:00 +010087 u64 logical;
88 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010089#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
91#else
92 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
93#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -040094 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +010095 int next_free;
96 struct btrfs_work work;
97};
98
Stefan Behrensb5d67f62012-03-27 14:21:27 -040099struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100100 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400101 int page_count;
102 atomic_t outstanding_pages;
Elena Reshetova186debd2017-03-03 10:55:23 +0200103 refcount_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100104 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800105 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 struct {
107 unsigned int header_error:1;
108 unsigned int checksum_error:1;
109 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200110 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800111
112 /* The following is for the data used to check parity */
113 /* It is for the data with checksum */
114 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400115 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700116 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400117};
118
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800119/* Used for the chunks with parity stripe such RAID5/6 */
120struct scrub_parity {
121 struct scrub_ctx *sctx;
122
123 struct btrfs_device *scrub_dev;
124
125 u64 logic_start;
126
127 u64 logic_end;
128
129 int nsectors;
130
Liu Bo972d7212017-04-03 13:45:33 -0700131 u64 stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800132
Elena Reshetova78a76452017-03-03 10:55:24 +0200133 refcount_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800134
135 struct list_head spages;
136
137 /* Work of parity check and repair */
138 struct btrfs_work work;
139
140 /* Mark the parity blocks which have data */
141 unsigned long *dbitmap;
142
143 /*
144 * Mark the parity blocks which have data, but errors happen when
145 * read data or check data
146 */
147 unsigned long *ebitmap;
148
149 unsigned long bitmap[0];
150};
151
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100152struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100153 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400154 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100155 int first_free;
156 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100157 atomic_t bios_in_flight;
158 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100159 spinlock_t list_lock;
160 wait_queue_head_t list_wait;
161 u16 csum_size;
162 struct list_head csum_list;
163 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100164 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100165 int pages_per_rd_bio;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100166
167 int is_dev_replace;
David Sterba3fb99302017-05-16 19:10:32 +0200168
169 struct scrub_bio *wr_curr_bio;
170 struct mutex wr_lock;
171 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
David Sterba3fb99302017-05-16 19:10:32 +0200172 struct btrfs_device *wr_tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200173 bool flush_all_writes;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100174
Arne Jansena2de7332011-03-08 14:14:00 +0100175 /*
176 * statistics
177 */
178 struct btrfs_scrub_progress stat;
179 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000180
181 /*
182 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 * decrement bios_in_flight and workers_pending and then do a wakeup
184 * on the list_wait wait queue. We must ensure the main scrub task
185 * doesn't free the scrub context before or while the workers are
186 * doing the wakeup() call.
187 */
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200188 refcount_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100189};
190
Jan Schmidt558540c2011-06-13 19:59:12 +0200191struct scrub_warning {
192 struct btrfs_path *path;
193 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200194 const char *errstr;
David Sterba6aa21262017-10-04 17:07:07 +0200195 u64 physical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200196 u64 logical;
197 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200198};
199
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800200struct full_stripe_lock {
201 struct rb_node node;
202 u64 logical;
203 u64 refs;
204 struct mutex mutex;
205};
206
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100207static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
208static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400209static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800210static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100211 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100212static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +0800213 struct scrub_block *sblock,
214 int retry_failed_mirror);
Zhao Leiba7cf982015-08-24 21:18:02 +0800215static void scrub_recheck_block_checksum(struct scrub_block *sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400216static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800217 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400218static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
219 struct scrub_block *sblock_good,
220 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100221static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
222static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
223 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400224static int scrub_checksum_data(struct scrub_block *sblock);
225static int scrub_checksum_tree_block(struct scrub_block *sblock);
226static int scrub_checksum_super(struct scrub_block *sblock);
227static void scrub_block_get(struct scrub_block *sblock);
228static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100229static void scrub_page_get(struct scrub_page *spage);
230static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800231static void scrub_parity_get(struct scrub_parity *sparity);
232static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100233static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
234 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100235static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100236 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100237 u64 gen, int mirror_num, u8 *csum, int force,
238 u64 physical_for_dev_replace);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200239static void scrub_bio_end_io(struct bio *bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400240static void scrub_bio_end_io_worker(struct btrfs_work *work);
241static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100242static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
243 u64 extent_logical, u64 extent_len,
244 u64 *extent_physical,
245 struct btrfs_device **extent_dev,
246 int *extent_mirror_num);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100247static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
248 struct scrub_page *spage);
249static void scrub_wr_submit(struct scrub_ctx *sctx);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200250static void scrub_wr_bio_end_io(struct bio *bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100251static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800252static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800253static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000254static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400255
Liu Bo762221f2018-01-02 13:36:42 -0700256static inline int scrub_is_page_on_raid56(struct scrub_page *page)
257{
258 return page->recover &&
259 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
260}
Stefan Behrens1623ede2012-03-27 14:21:26 -0400261
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100262static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
263{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200264 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100265 atomic_inc(&sctx->bios_in_flight);
266}
267
268static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
269{
270 atomic_dec(&sctx->bios_in_flight);
271 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000272 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100273}
274
Wang Shilongcb7ab022013-12-04 21:16:53 +0800275static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800276{
277 while (atomic_read(&fs_info->scrub_pause_req)) {
278 mutex_unlock(&fs_info->scrub_lock);
279 wait_event(fs_info->scrub_pause_wait,
280 atomic_read(&fs_info->scrub_pause_req) == 0);
281 mutex_lock(&fs_info->scrub_lock);
282 }
283}
284
Zhaolei0e22be82015-08-05 16:43:28 +0800285static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800286{
287 atomic_inc(&fs_info->scrubs_paused);
288 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800289}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800290
Zhaolei0e22be82015-08-05 16:43:28 +0800291static void scrub_pause_off(struct btrfs_fs_info *fs_info)
292{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800293 mutex_lock(&fs_info->scrub_lock);
294 __scrub_blocked_if_needed(fs_info);
295 atomic_dec(&fs_info->scrubs_paused);
296 mutex_unlock(&fs_info->scrub_lock);
297
298 wake_up(&fs_info->scrub_pause_wait);
299}
300
Zhaolei0e22be82015-08-05 16:43:28 +0800301static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
302{
303 scrub_pause_on(fs_info);
304 scrub_pause_off(fs_info);
305}
306
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100307/*
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800308 * Insert new full stripe lock into full stripe locks tree
309 *
310 * Return pointer to existing or newly inserted full_stripe_lock structure if
311 * everything works well.
312 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
313 *
314 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
315 * function
316 */
317static struct full_stripe_lock *insert_full_stripe_lock(
318 struct btrfs_full_stripe_locks_tree *locks_root,
319 u64 fstripe_logical)
320{
321 struct rb_node **p;
322 struct rb_node *parent = NULL;
323 struct full_stripe_lock *entry;
324 struct full_stripe_lock *ret;
Filipe Mananaa5fb1142018-11-26 20:07:17 +0000325 unsigned int nofs_flag;
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800326
David Sterbaa32bf9a2018-03-16 02:21:22 +0100327 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800328
329 p = &locks_root->root.rb_node;
330 while (*p) {
331 parent = *p;
332 entry = rb_entry(parent, struct full_stripe_lock, node);
333 if (fstripe_logical < entry->logical) {
334 p = &(*p)->rb_left;
335 } else if (fstripe_logical > entry->logical) {
336 p = &(*p)->rb_right;
337 } else {
338 entry->refs++;
339 return entry;
340 }
341 }
342
Filipe Mananaa5fb1142018-11-26 20:07:17 +0000343 /*
344 * Insert new lock.
345 *
346 * We must use GFP_NOFS because the scrub task might be waiting for a
347 * worker task executing this function and in turn a transaction commit
348 * might be waiting the scrub task to pause (which needs to wait for all
349 * the worker tasks to complete before pausing).
350 */
351 nofs_flag = memalloc_nofs_save();
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800352 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
Filipe Mananaa5fb1142018-11-26 20:07:17 +0000353 memalloc_nofs_restore(nofs_flag);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800354 if (!ret)
355 return ERR_PTR(-ENOMEM);
356 ret->logical = fstripe_logical;
357 ret->refs = 1;
358 mutex_init(&ret->mutex);
359
360 rb_link_node(&ret->node, parent, p);
361 rb_insert_color(&ret->node, &locks_root->root);
362 return ret;
363}
364
365/*
366 * Search for a full stripe lock of a block group
367 *
368 * Return pointer to existing full stripe lock if found
369 * Return NULL if not found
370 */
371static struct full_stripe_lock *search_full_stripe_lock(
372 struct btrfs_full_stripe_locks_tree *locks_root,
373 u64 fstripe_logical)
374{
375 struct rb_node *node;
376 struct full_stripe_lock *entry;
377
David Sterbaa32bf9a2018-03-16 02:21:22 +0100378 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800379
380 node = locks_root->root.rb_node;
381 while (node) {
382 entry = rb_entry(node, struct full_stripe_lock, node);
383 if (fstripe_logical < entry->logical)
384 node = node->rb_left;
385 else if (fstripe_logical > entry->logical)
386 node = node->rb_right;
387 else
388 return entry;
389 }
390 return NULL;
391}
392
393/*
394 * Helper to get full stripe logical from a normal bytenr.
395 *
396 * Caller must ensure @cache is a RAID56 block group.
397 */
398static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
399 u64 bytenr)
400{
401 u64 ret;
402
403 /*
404 * Due to chunk item size limit, full stripe length should not be
405 * larger than U32_MAX. Just a sanity check here.
406 */
407 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
408
409 /*
410 * round_down() can only handle power of 2, while RAID56 full
411 * stripe length can be 64KiB * n, so we need to manually round down.
412 */
413 ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
414 cache->full_stripe_len + cache->key.objectid;
415 return ret;
416}
417
418/*
419 * Lock a full stripe to avoid concurrency of recovery and read
420 *
421 * It's only used for profiles with parities (RAID5/6), for other profiles it
422 * does nothing.
423 *
424 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
425 * So caller must call unlock_full_stripe() at the same context.
426 *
427 * Return <0 if encounters error.
428 */
429static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
430 bool *locked_ret)
431{
432 struct btrfs_block_group_cache *bg_cache;
433 struct btrfs_full_stripe_locks_tree *locks_root;
434 struct full_stripe_lock *existing;
435 u64 fstripe_start;
436 int ret = 0;
437
438 *locked_ret = false;
439 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
440 if (!bg_cache) {
441 ASSERT(0);
442 return -ENOENT;
443 }
444
445 /* Profiles not based on parity don't need full stripe lock */
446 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
447 goto out;
448 locks_root = &bg_cache->full_stripe_locks_root;
449
450 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
451
452 /* Now insert the full stripe lock */
453 mutex_lock(&locks_root->lock);
454 existing = insert_full_stripe_lock(locks_root, fstripe_start);
455 mutex_unlock(&locks_root->lock);
456 if (IS_ERR(existing)) {
457 ret = PTR_ERR(existing);
458 goto out;
459 }
460 mutex_lock(&existing->mutex);
461 *locked_ret = true;
462out:
463 btrfs_put_block_group(bg_cache);
464 return ret;
465}
466
467/*
468 * Unlock a full stripe.
469 *
470 * NOTE: Caller must ensure it's the same context calling corresponding
471 * lock_full_stripe().
472 *
473 * Return 0 if we unlock full stripe without problem.
474 * Return <0 for error
475 */
476static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
477 bool locked)
478{
479 struct btrfs_block_group_cache *bg_cache;
480 struct btrfs_full_stripe_locks_tree *locks_root;
481 struct full_stripe_lock *fstripe_lock;
482 u64 fstripe_start;
483 bool freeit = false;
484 int ret = 0;
485
486 /* If we didn't acquire full stripe lock, no need to continue */
487 if (!locked)
488 return 0;
489
490 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
491 if (!bg_cache) {
492 ASSERT(0);
493 return -ENOENT;
494 }
495 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
496 goto out;
497
498 locks_root = &bg_cache->full_stripe_locks_root;
499 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
500
501 mutex_lock(&locks_root->lock);
502 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
503 /* Unpaired unlock_full_stripe() detected */
504 if (!fstripe_lock) {
505 WARN_ON(1);
506 ret = -ENOENT;
507 mutex_unlock(&locks_root->lock);
508 goto out;
509 }
510
511 if (fstripe_lock->refs == 0) {
512 WARN_ON(1);
513 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
514 fstripe_lock->logical);
515 } else {
516 fstripe_lock->refs--;
517 }
518
519 if (fstripe_lock->refs == 0) {
520 rb_erase(&fstripe_lock->node, &locks_root->root);
521 freeit = true;
522 }
523 mutex_unlock(&locks_root->lock);
524
525 mutex_unlock(&fstripe_lock->mutex);
526 if (freeit)
527 kfree(fstripe_lock);
528out:
529 btrfs_put_block_group(bg_cache);
530 return ret;
531}
532
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100533static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100534{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100535 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100536 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100537 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100538 struct btrfs_ordered_sum, list);
539 list_del(&sum->list);
540 kfree(sum);
541 }
542}
543
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100544static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100545{
546 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100547
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100548 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100549 return;
550
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400551 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100552 if (sctx->curr != -1) {
553 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400554
555 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100556 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400557 scrub_block_put(sbio->pagev[i]->sblock);
558 }
559 bio_put(sbio->bio);
560 }
561
Stefan Behrensff023aa2012-11-06 11:43:11 +0100562 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100563 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100564
565 if (!sbio)
566 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100567 kfree(sbio);
568 }
569
David Sterba3fb99302017-05-16 19:10:32 +0200570 kfree(sctx->wr_curr_bio);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100571 scrub_free_csums(sctx);
572 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100573}
574
Filipe Mananaf55985f2015-02-09 21:14:24 +0000575static void scrub_put_ctx(struct scrub_ctx *sctx)
576{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200577 if (refcount_dec_and_test(&sctx->refs))
Filipe Mananaf55985f2015-02-09 21:14:24 +0000578 scrub_free_ctx(sctx);
579}
580
Arne Jansena2de7332011-03-08 14:14:00 +0100581static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100582struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100583{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100584 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100585 int i;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400586 struct btrfs_fs_info *fs_info = dev->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100587
David Sterba58c4e172016-02-11 10:49:42 +0100588 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100589 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100590 goto nomem;
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200591 refcount_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100592 sctx->is_dev_replace = is_dev_replace;
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200593 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100594 sctx->curr = -1;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400595 sctx->fs_info = dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100596 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100597 struct scrub_bio *sbio;
598
David Sterba58c4e172016-02-11 10:49:42 +0100599 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
Arne Jansena2de7332011-03-08 14:14:00 +0100600 if (!sbio)
601 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100602 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100603
Arne Jansena2de7332011-03-08 14:14:00 +0100604 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100605 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400606 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800607 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
608 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100609
Stefan Behrensff023aa2012-11-06 11:43:11 +0100610 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100611 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200612 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100613 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100614 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100615 sctx->first_free = 0;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100616 atomic_set(&sctx->bios_in_flight, 0);
617 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100618 atomic_set(&sctx->cancel_req, 0);
619 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
620 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100621
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100622 spin_lock_init(&sctx->list_lock);
623 spin_lock_init(&sctx->stat_lock);
624 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100625
David Sterba3fb99302017-05-16 19:10:32 +0200626 WARN_ON(sctx->wr_curr_bio != NULL);
627 mutex_init(&sctx->wr_lock);
628 sctx->wr_curr_bio = NULL;
David Sterba8fcdac32017-05-16 19:10:23 +0200629 if (is_dev_replace) {
David Sterbaded56182017-06-26 15:19:00 +0200630 WARN_ON(!fs_info->dev_replace.tgtdev);
David Sterba3fb99302017-05-16 19:10:32 +0200631 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
David Sterbaded56182017-06-26 15:19:00 +0200632 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200633 sctx->flush_all_writes = false;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100634 }
David Sterba8fcdac32017-05-16 19:10:23 +0200635
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100636 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100637
638nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100639 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100640 return ERR_PTR(-ENOMEM);
641}
642
Stefan Behrensff023aa2012-11-06 11:43:11 +0100643static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
644 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200645{
646 u64 isize;
647 u32 nlink;
648 int ret;
649 int i;
David Sterbade2491f2017-05-31 19:21:38 +0200650 unsigned nofs_flag;
Jan Schmidt558540c2011-06-13 19:59:12 +0200651 struct extent_buffer *eb;
652 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100653 struct scrub_warning *swarn = warn_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400654 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200655 struct inode_fs_paths *ipath = NULL;
656 struct btrfs_root *local_root;
657 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100658 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200659
660 root_key.objectid = root;
661 root_key.type = BTRFS_ROOT_ITEM_KEY;
662 root_key.offset = (u64)-1;
663 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
664 if (IS_ERR(local_root)) {
665 ret = PTR_ERR(local_root);
666 goto err;
667 }
668
David Sterba14692cc2015-01-02 18:55:46 +0100669 /*
670 * this makes the path point to (inum INODE_ITEM ioff)
671 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100672 key.objectid = inum;
673 key.type = BTRFS_INODE_ITEM_KEY;
674 key.offset = 0;
675
676 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200677 if (ret) {
678 btrfs_release_path(swarn->path);
679 goto err;
680 }
681
682 eb = swarn->path->nodes[0];
683 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
684 struct btrfs_inode_item);
685 isize = btrfs_inode_size(eb, inode_item);
686 nlink = btrfs_inode_nlink(eb, inode_item);
687 btrfs_release_path(swarn->path);
688
David Sterbade2491f2017-05-31 19:21:38 +0200689 /*
690 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
691 * uses GFP_NOFS in this context, so we keep it consistent but it does
692 * not seem to be strictly necessary.
693 */
694 nofs_flag = memalloc_nofs_save();
Jan Schmidt558540c2011-06-13 19:59:12 +0200695 ipath = init_ipath(4096, local_root, swarn->path);
David Sterbade2491f2017-05-31 19:21:38 +0200696 memalloc_nofs_restore(nofs_flag);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300697 if (IS_ERR(ipath)) {
698 ret = PTR_ERR(ipath);
699 ipath = NULL;
700 goto err;
701 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200702 ret = paths_from_inode(inum, ipath);
703
704 if (ret < 0)
705 goto err;
706
707 /*
708 * we deliberately ignore the bit ipath might have been too small to
709 * hold all of the paths here
710 */
711 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400712 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200713"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400714 swarn->errstr, swarn->logical,
715 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200716 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400717 root, inum, offset,
718 min(isize - offset, (u64)PAGE_SIZE), nlink,
719 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200720
721 free_ipath(ipath);
722 return 0;
723
724err:
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400725 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200726 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400727 swarn->errstr, swarn->logical,
728 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200729 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400730 root, inum, offset, ret);
Jan Schmidt558540c2011-06-13 19:59:12 +0200731
732 free_ipath(ipath);
733 return 0;
734}
735
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400736static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200737{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100738 struct btrfs_device *dev;
739 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200740 struct btrfs_path *path;
741 struct btrfs_key found_key;
742 struct extent_buffer *eb;
743 struct btrfs_extent_item *ei;
744 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200745 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100746 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600747 u64 flags = 0;
748 u64 ref_root;
749 u32 item_size;
Dan Carpenter07c9a8e2016-03-11 11:08:56 +0300750 u8 ref_level = 0;
Liu Bo69917e42012-09-07 20:01:28 -0600751 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200752
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100753 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100754 dev = sblock->pagev[0]->dev;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400755 fs_info = sblock->sctx->fs_info;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100756
Jan Schmidt558540c2011-06-13 19:59:12 +0200757 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200758 if (!path)
759 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200760
David Sterba6aa21262017-10-04 17:07:07 +0200761 swarn.physical = sblock->pagev[0]->physical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100762 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200763 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100764 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200765
Liu Bo69917e42012-09-07 20:01:28 -0600766 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
767 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200768 if (ret < 0)
769 goto out;
770
Jan Schmidt4692cf52011-12-02 14:56:41 +0100771 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200772 swarn.extent_item_size = found_key.offset;
773
774 eb = path->nodes[0];
775 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
776 item_size = btrfs_item_size_nr(eb, path->slots[0]);
777
Liu Bo69917e42012-09-07 20:01:28 -0600778 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200779 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800780 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
781 item_size, &ref_root,
782 &ref_level);
David Sterbaecaeb142015-10-08 09:01:03 +0200783 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200784"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400785 errstr, swarn.logical,
Josef Bacik606686e2012-06-04 14:03:51 -0400786 rcu_str_deref(dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200787 swarn.physical,
Jan Schmidt558540c2011-06-13 19:59:12 +0200788 ref_level ? "node" : "leaf",
789 ret < 0 ? -1 : ref_level,
790 ret < 0 ? -1 : ref_root);
791 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600792 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200793 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600794 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200795 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100796 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100797 iterate_extent_inodes(fs_info, found_key.objectid,
798 extent_item_pos, 1,
Zygo Blaxellc995ab32017-09-22 13:58:45 -0400799 scrub_print_warning_inode, &swarn, false);
Jan Schmidt558540c2011-06-13 19:59:12 +0200800 }
801
802out:
803 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200804}
805
Miao Xieaf8e2d12014-10-23 14:42:50 +0800806static inline void scrub_get_recover(struct scrub_recover *recover)
807{
Elena Reshetova6f615012017-03-03 10:55:21 +0200808 refcount_inc(&recover->refs);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800809}
810
Qu Wenruoe501bfe2017-03-29 09:33:22 +0800811static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
812 struct scrub_recover *recover)
Miao Xieaf8e2d12014-10-23 14:42:50 +0800813{
Elena Reshetova6f615012017-03-03 10:55:21 +0200814 if (refcount_dec_and_test(&recover->refs)) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +0800815 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +0800816 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800817 kfree(recover);
818 }
819}
820
Arne Jansena2de7332011-03-08 14:14:00 +0100821/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400822 * scrub_handle_errored_block gets called when either verification of the
823 * pages failed or the bio failed to read, e.g. with EIO. In the latter
824 * case, this function handles all pages in the bio, even though only one
825 * may be bad.
826 * The goal of this function is to repair the errored block by using the
827 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100828 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400829static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100830{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100831 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100832 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400833 struct btrfs_fs_info *fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400834 u64 logical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400835 unsigned int failed_mirror_index;
836 unsigned int is_metadata;
837 unsigned int have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400838 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
839 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100840 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400841 int mirror_index;
842 int page_num;
843 int success;
Qu Wenruo28d70e22017-04-14 08:35:55 +0800844 bool full_stripe_locked;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400845 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
846 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100847
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400848 BUG_ON(sblock_to_check->page_count < 1);
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400849 fs_info = sctx->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000850 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
851 /*
852 * if we find an error in a super block, we just report it.
853 * They will get written with the next transaction commit
854 * anyway
855 */
856 spin_lock(&sctx->stat_lock);
857 ++sctx->stat.super_errors;
858 spin_unlock(&sctx->stat_lock);
859 return 0;
860 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100861 logical = sblock_to_check->pagev[0]->logical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100862 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
863 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
864 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400865 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100866 have_csum = sblock_to_check->pagev[0]->have_csum;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100867 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400868
Qu Wenruo28d70e22017-04-14 08:35:55 +0800869 /*
870 * For RAID5/6, race can happen for a different device scrub thread.
871 * For data corruption, Parity and Data threads will both try
872 * to recovery the data.
873 * Race can lead to doubly added csum error, or even unrecoverable
874 * error.
875 */
876 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
877 if (ret < 0) {
878 spin_lock(&sctx->stat_lock);
879 if (ret == -ENOMEM)
880 sctx->stat.malloc_errors++;
881 sctx->stat.read_errors++;
882 sctx->stat.uncorrectable_errors++;
883 spin_unlock(&sctx->stat_lock);
884 return ret;
885 }
886
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400887 /*
888 * read all mirrors one after the other. This includes to
889 * re-read the extent or metadata block that failed (that was
890 * the cause that this fixup code is called) another time,
891 * page by page this time in order to know which pages
892 * caused I/O errors and which ones are good (for all mirrors).
893 * It is the goal to handle the situation when more than one
894 * mirror contains I/O errors, but the errors do not
895 * overlap, i.e. the data can be repaired by selecting the
896 * pages from those mirrors without I/O error on the
897 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
898 * would be that mirror #1 has an I/O error on the first page,
899 * the second page is good, and mirror #2 has an I/O error on
900 * the second page, but the first page is good.
901 * Then the first page of the first mirror can be repaired by
902 * taking the first page of the second mirror, and the
903 * second page of the second mirror can be repaired by
904 * copying the contents of the 2nd page of the 1st mirror.
905 * One more note: if the pages of one mirror contain I/O
906 * errors, the checksum cannot be verified. In order to get
907 * the best data for repairing, the first attempt is to find
908 * a mirror without I/O errors and with a validated checksum.
909 * Only if this is not possible, the pages are picked from
910 * mirrors with I/O errors without considering the checksum.
911 * If the latter is the case, at the end, the checksum of the
912 * repaired area is verified in order to correctly maintain
913 * the statistics.
914 */
915
David Sterba31e818f2015-02-20 18:00:26 +0100916 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
917 sizeof(*sblocks_for_recheck), GFP_NOFS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400918 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100919 spin_lock(&sctx->stat_lock);
920 sctx->stat.malloc_errors++;
921 sctx->stat.read_errors++;
922 sctx->stat.uncorrectable_errors++;
923 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100924 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400925 goto out;
926 }
927
928 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +0800929 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400930 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100931 spin_lock(&sctx->stat_lock);
932 sctx->stat.read_errors++;
933 sctx->stat.uncorrectable_errors++;
934 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100935 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400936 goto out;
937 }
938 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
939 sblock_bad = sblocks_for_recheck + failed_mirror_index;
940
941 /* build and submit the bios for the failed mirror, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +0800942 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400943
944 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
945 sblock_bad->no_io_error_seen) {
946 /*
947 * the error disappeared after reading page by page, or
948 * the area was part of a huge bio and other parts of the
949 * bio caused I/O errors, or the block layer merged several
950 * read requests into one and the error is caused by a
951 * different bio (usually one of the two latter cases is
952 * the cause)
953 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100954 spin_lock(&sctx->stat_lock);
955 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800956 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100957 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400958
Stefan Behrensff023aa2012-11-06 11:43:11 +0100959 if (sctx->is_dev_replace)
960 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400961 goto out;
962 }
963
964 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100965 spin_lock(&sctx->stat_lock);
966 sctx->stat.read_errors++;
967 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400968 if (__ratelimit(&_rs))
969 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100970 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400971 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100972 spin_lock(&sctx->stat_lock);
973 sctx->stat.csum_errors++;
974 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400975 if (__ratelimit(&_rs))
976 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100977 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200978 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400979 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100980 spin_lock(&sctx->stat_lock);
981 sctx->stat.verify_errors++;
982 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400983 if (__ratelimit(&_rs))
984 scrub_print_warning("checksum/header error",
985 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200986 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100987 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200988 BTRFS_DEV_STAT_GENERATION_ERRS);
989 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100990 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200991 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400992 }
993
Ilya Dryomov33ef30a2013-11-03 19:06:38 +0200994 if (sctx->readonly) {
995 ASSERT(!sctx->is_dev_replace);
996 goto out;
997 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400998
Qu Wenruo665d4952018-07-11 13:41:21 +0800999 /*
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001000 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001001 * checksums.
1002 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001003 * errors and also does not have a checksum error.
1004 * If one is found, and if a checksum is present, the full block
1005 * that is known to contain an error is rewritten. Afterwards
1006 * the block is known to be corrected.
1007 * If a mirror is found which is completely correct, and no
1008 * checksum is present, only those pages are rewritten that had
1009 * an I/O error in the block to be repaired, since it cannot be
1010 * determined, which copy of the other pages is better (and it
1011 * could happen otherwise that a correct page would be
1012 * overwritten by a bad one).
1013 */
Liu Bo762221f2018-01-02 13:36:42 -07001014 for (mirror_index = 0; ;mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001015 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001016
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001017 if (mirror_index == failed_mirror_index)
1018 continue;
Liu Bo762221f2018-01-02 13:36:42 -07001019
1020 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1021 if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1022 if (mirror_index >= BTRFS_MAX_MIRRORS)
1023 break;
1024 if (!sblocks_for_recheck[mirror_index].page_count)
1025 break;
1026
1027 sblock_other = sblocks_for_recheck + mirror_index;
1028 } else {
1029 struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1030 int max_allowed = r->bbio->num_stripes -
1031 r->bbio->num_tgtdevs;
1032
1033 if (mirror_index >= max_allowed)
1034 break;
1035 if (!sblocks_for_recheck[1].page_count)
1036 break;
1037
1038 ASSERT(failed_mirror_index == 0);
1039 sblock_other = sblocks_for_recheck + 1;
1040 sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1041 }
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001042
1043 /* build and submit the bios, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001044 scrub_recheck_block(fs_info, sblock_other, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001045
1046 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001047 !sblock_other->checksum_error &&
1048 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001049 if (sctx->is_dev_replace) {
1050 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001051 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001052 } else {
1053 ret = scrub_repair_block_from_good_copy(
1054 sblock_bad, sblock_other);
1055 if (!ret)
1056 goto corrected_error;
1057 }
Arne Jansena2de7332011-03-08 14:14:00 +01001058 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001059 }
1060
Zhao Leib968fed2015-01-20 15:11:41 +08001061 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1062 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001063
1064 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001065 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001066 * repaired, continue by picking good copies of those pages.
1067 * Select the good pages from mirrors to rewrite bad pages from
1068 * the area to fix. Afterwards verify the checksum of the block
1069 * that is supposed to be repaired. This verification step is
1070 * only done for the purpose of statistic counting and for the
1071 * final scrub report, whether errors remain.
1072 * A perfect algorithm could make use of the checksum and try
1073 * all possible combinations of pages from the different mirrors
1074 * until the checksum verification succeeds. For example, when
1075 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1076 * of mirror #2 is readable but the final checksum test fails,
1077 * then the 2nd page of mirror #3 could be tried, whether now
Nicholas D Steeves01327612016-05-19 21:18:45 -04001078 * the final checksum succeeds. But this would be a rare
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001079 * exception and is therefore not implemented. At least it is
1080 * avoided that the good copy is overwritten.
1081 * A more useful improvement would be to pick the sectors
1082 * without I/O error based on sector sizes (512 bytes on legacy
1083 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1084 * mirror could be repaired by taking 512 byte of a different
1085 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1086 * area are unreadable.
1087 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001088 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001089 for (page_num = 0; page_num < sblock_bad->page_count;
1090 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001091 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001092 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001093
Zhao Leib968fed2015-01-20 15:11:41 +08001094 /* skip no-io-error page in scrub */
1095 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001096 continue;
1097
Liu Bo47597002018-03-02 16:10:41 -07001098 if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1099 /*
1100 * In case of dev replace, if raid56 rebuild process
1101 * didn't work out correct data, then copy the content
1102 * in sblock_bad to make sure target device is identical
1103 * to source device, instead of writing garbage data in
1104 * sblock_for_recheck array to target device.
1105 */
1106 sblock_other = NULL;
1107 } else if (page_bad->io_error) {
1108 /* try to find no-io-error page in mirrors */
Zhao Leib968fed2015-01-20 15:11:41 +08001109 for (mirror_index = 0;
1110 mirror_index < BTRFS_MAX_MIRRORS &&
1111 sblocks_for_recheck[mirror_index].page_count > 0;
1112 mirror_index++) {
1113 if (!sblocks_for_recheck[mirror_index].
1114 pagev[page_num]->io_error) {
1115 sblock_other = sblocks_for_recheck +
1116 mirror_index;
1117 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001118 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001119 }
Zhao Leib968fed2015-01-20 15:11:41 +08001120 if (!sblock_other)
1121 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001122 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001123
Zhao Leib968fed2015-01-20 15:11:41 +08001124 if (sctx->is_dev_replace) {
1125 /*
1126 * did not find a mirror to fetch the page
1127 * from. scrub_write_page_to_dev_replace()
1128 * handles this case (page->io_error), by
1129 * filling the block with zeros before
1130 * submitting the write request
1131 */
1132 if (!sblock_other)
1133 sblock_other = sblock_bad;
1134
1135 if (scrub_write_page_to_dev_replace(sblock_other,
1136 page_num) != 0) {
David Sterbae37abe92018-04-04 17:20:52 +02001137 atomic64_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001138 &fs_info->dev_replace.num_write_errors);
Zhao Leib968fed2015-01-20 15:11:41 +08001139 success = 0;
1140 }
1141 } else if (sblock_other) {
1142 ret = scrub_repair_page_from_good_copy(sblock_bad,
1143 sblock_other,
1144 page_num, 0);
1145 if (0 == ret)
1146 page_bad->io_error = 0;
1147 else
1148 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001149 }
1150 }
1151
Zhao Leib968fed2015-01-20 15:11:41 +08001152 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001153 if (is_metadata || have_csum) {
1154 /*
1155 * need to verify the checksum now that all
1156 * sectors on disk are repaired (the write
1157 * request for data to be repaired is on its way).
1158 * Just be lazy and use scrub_recheck_block()
1159 * which re-reads the data before the checksum
1160 * is verified, but most likely the data comes out
1161 * of the page cache.
1162 */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001163 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001164 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001165 !sblock_bad->checksum_error &&
1166 sblock_bad->no_io_error_seen)
1167 goto corrected_error;
1168 else
1169 goto did_not_correct_error;
1170 } else {
1171corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001172 spin_lock(&sctx->stat_lock);
1173 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001174 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001175 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001176 btrfs_err_rl_in_rcu(fs_info,
1177 "fixed up error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001178 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001179 }
1180 } else {
1181did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001182 spin_lock(&sctx->stat_lock);
1183 sctx->stat.uncorrectable_errors++;
1184 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001185 btrfs_err_rl_in_rcu(fs_info,
1186 "unable to fixup (regular) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001187 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001188 }
1189
1190out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001191 if (sblocks_for_recheck) {
1192 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1193 mirror_index++) {
1194 struct scrub_block *sblock = sblocks_for_recheck +
1195 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001196 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001197 int page_index;
1198
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001199 for (page_index = 0; page_index < sblock->page_count;
1200 page_index++) {
1201 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001202 recover = sblock->pagev[page_index]->recover;
1203 if (recover) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001204 scrub_put_recover(fs_info, recover);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001205 sblock->pagev[page_index]->recover =
1206 NULL;
1207 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001208 scrub_page_put(sblock->pagev[page_index]);
1209 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001210 }
1211 kfree(sblocks_for_recheck);
1212 }
1213
Qu Wenruo28d70e22017-04-14 08:35:55 +08001214 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1215 if (ret < 0)
1216 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001217 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001218}
1219
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001220static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001221{
Zhao Lei10f11902015-01-20 15:11:43 +08001222 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1223 return 2;
1224 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1225 return 3;
1226 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001227 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001228}
1229
Zhao Lei10f11902015-01-20 15:11:43 +08001230static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1231 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001232 u64 mapped_length,
1233 int nstripes, int mirror,
1234 int *stripe_index,
1235 u64 *stripe_offset)
1236{
1237 int i;
1238
Zhao Leiffe2d202015-01-20 15:11:44 +08001239 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001240 /* RAID5/6 */
1241 for (i = 0; i < nstripes; i++) {
1242 if (raid_map[i] == RAID6_Q_STRIPE ||
1243 raid_map[i] == RAID5_P_STRIPE)
1244 continue;
1245
1246 if (logical >= raid_map[i] &&
1247 logical < raid_map[i] + mapped_length)
1248 break;
1249 }
1250
1251 *stripe_index = i;
1252 *stripe_offset = logical - raid_map[i];
1253 } else {
1254 /* The other RAID type */
1255 *stripe_index = mirror;
1256 *stripe_offset = 0;
1257 }
1258}
1259
Zhao Leibe50a8d2015-01-20 15:11:42 +08001260static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001261 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001262{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001263 struct scrub_ctx *sctx = original_sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001264 struct btrfs_fs_info *fs_info = sctx->fs_info;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001265 u64 length = original_sblock->page_count * PAGE_SIZE;
1266 u64 logical = original_sblock->pagev[0]->logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001267 u64 generation = original_sblock->pagev[0]->generation;
1268 u64 flags = original_sblock->pagev[0]->flags;
1269 u64 have_csum = original_sblock->pagev[0]->have_csum;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001270 struct scrub_recover *recover;
1271 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001272 u64 sublen;
1273 u64 mapped_length;
1274 u64 stripe_offset;
1275 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001276 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001277 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001278 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001279 int ret;
1280
1281 /*
Zhao Lei57019342015-01-20 15:11:45 +08001282 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001283 * are not used (and not set) in the blocks that are used for
1284 * the recheck procedure
1285 */
1286
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001287 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001288 sublen = min_t(u64, length, PAGE_SIZE);
1289 mapped_length = sublen;
1290 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001291
1292 /*
1293 * with a length of PAGE_SIZE, each returned stripe
1294 * represents one mirror
1295 */
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001296 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02001297 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
David Sterba825ad4c2017-03-28 14:45:22 +02001298 logical, &mapped_length, &bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001299 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001300 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001301 btrfs_bio_counter_dec(fs_info);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001302 return -EIO;
1303 }
1304
Miao Xieaf8e2d12014-10-23 14:42:50 +08001305 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1306 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001307 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001308 btrfs_bio_counter_dec(fs_info);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001309 return -ENOMEM;
1310 }
1311
Elena Reshetova6f615012017-03-03 10:55:21 +02001312 refcount_set(&recover->refs, 1);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001313 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001314 recover->map_length = mapped_length;
1315
Ashish Samant24731142016-04-29 18:33:59 -07001316 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001317
Zhao Leibe50a8d2015-01-20 15:11:42 +08001318 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001319
Miao Xieaf8e2d12014-10-23 14:42:50 +08001320 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001321 mirror_index++) {
1322 struct scrub_block *sblock;
1323 struct scrub_page *page;
1324
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001325 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001326 sblock->sctx = sctx;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001327
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001328 page = kzalloc(sizeof(*page), GFP_NOFS);
1329 if (!page) {
1330leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001331 spin_lock(&sctx->stat_lock);
1332 sctx->stat.malloc_errors++;
1333 spin_unlock(&sctx->stat_lock);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001334 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001335 return -ENOMEM;
1336 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001337 scrub_page_get(page);
1338 sblock->pagev[page_index] = page;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001339 page->sblock = sblock;
1340 page->flags = flags;
1341 page->generation = generation;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001342 page->logical = logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001343 page->have_csum = have_csum;
1344 if (have_csum)
1345 memcpy(page->csum,
1346 original_sblock->pagev[0]->csum,
1347 sctx->csum_size);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001348
Zhao Lei10f11902015-01-20 15:11:43 +08001349 scrub_stripe_index_and_offset(logical,
1350 bbio->map_type,
1351 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001352 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001353 bbio->num_stripes -
1354 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001355 mirror_index,
1356 &stripe_index,
1357 &stripe_offset);
1358 page->physical = bbio->stripes[stripe_index].physical +
1359 stripe_offset;
1360 page->dev = bbio->stripes[stripe_index].dev;
1361
Stefan Behrensff023aa2012-11-06 11:43:11 +01001362 BUG_ON(page_index >= original_sblock->page_count);
1363 page->physical_for_dev_replace =
1364 original_sblock->pagev[page_index]->
1365 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001366 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001367 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001368 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001369 page->page = alloc_page(GFP_NOFS);
1370 if (!page->page)
1371 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001372
1373 scrub_get_recover(recover);
1374 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001375 }
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001376 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001377 length -= sublen;
1378 logical += sublen;
1379 page_index++;
1380 }
1381
1382 return 0;
1383}
1384
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001385static void scrub_bio_wait_endio(struct bio *bio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001386{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001387 complete(bio->bi_private);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001388}
1389
Miao Xieaf8e2d12014-10-23 14:42:50 +08001390static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1391 struct bio *bio,
1392 struct scrub_page *page)
1393{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001394 DECLARE_COMPLETION_ONSTACK(done);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001395 int ret;
Liu Bo762221f2018-01-02 13:36:42 -07001396 int mirror_num;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001397
Miao Xieaf8e2d12014-10-23 14:42:50 +08001398 bio->bi_iter.bi_sector = page->logical >> 9;
1399 bio->bi_private = &done;
1400 bio->bi_end_io = scrub_bio_wait_endio;
1401
Liu Bo762221f2018-01-02 13:36:42 -07001402 mirror_num = page->sblock->pagev[0]->mirror_num;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001403 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001404 page->recover->map_length,
Liu Bo762221f2018-01-02 13:36:42 -07001405 mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001406 if (ret)
1407 return ret;
1408
Liu Bob4ff5ad2017-11-30 17:26:39 -07001409 wait_for_completion_io(&done);
1410 return blk_status_to_errno(bio->bi_status);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001411}
1412
Liu Bo6ca17652018-03-07 12:08:09 -07001413static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1414 struct scrub_block *sblock)
1415{
1416 struct scrub_page *first_page = sblock->pagev[0];
1417 struct bio *bio;
1418 int page_num;
1419
1420 /* All pages in sblock belong to the same stripe on the same device. */
1421 ASSERT(first_page->dev);
1422 if (!first_page->dev->bdev)
1423 goto out;
1424
1425 bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1426 bio_set_dev(bio, first_page->dev->bdev);
1427
1428 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1429 struct scrub_page *page = sblock->pagev[page_num];
1430
1431 WARN_ON(!page->page);
1432 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1433 }
1434
1435 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1436 bio_put(bio);
1437 goto out;
1438 }
1439
1440 bio_put(bio);
1441
1442 scrub_recheck_block_checksum(sblock);
1443
1444 return;
1445out:
1446 for (page_num = 0; page_num < sblock->page_count; page_num++)
1447 sblock->pagev[page_num]->io_error = 1;
1448
1449 sblock->no_io_error_seen = 0;
1450}
1451
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001452/*
1453 * this function will check the on disk data for checksum errors, header
1454 * errors and read I/O errors. If any I/O errors happen, the exact pages
1455 * which are errored are marked as being bad. The goal is to enable scrub
1456 * to take those pages that are not errored from all the mirrors so that
1457 * the pages that are errored in the just handled mirror can be repaired.
1458 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001459static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +08001460 struct scrub_block *sblock,
1461 int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001462{
1463 int page_num;
1464
1465 sblock->no_io_error_seen = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001466
Liu Bo6ca17652018-03-07 12:08:09 -07001467 /* short cut for raid56 */
1468 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1469 return scrub_recheck_block_on_raid56(fs_info, sblock);
1470
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001471 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1472 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001473 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001474
Stefan Behrens442a4f62012-05-25 16:06:08 +02001475 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001476 page->io_error = 1;
1477 sblock->no_io_error_seen = 0;
1478 continue;
1479 }
1480
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001481 WARN_ON(!page->page);
David Sterbac5e4c3d2017-06-12 17:29:41 +02001482 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001483 bio_set_dev(bio, page->dev->bdev);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001484
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001485 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Liu Bo6ca17652018-03-07 12:08:09 -07001486 bio->bi_iter.bi_sector = page->physical >> 9;
1487 bio->bi_opf = REQ_OP_READ;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001488
Liu Bo6ca17652018-03-07 12:08:09 -07001489 if (btrfsic_submit_bio_wait(bio)) {
1490 page->io_error = 1;
1491 sblock->no_io_error_seen = 0;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001492 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001493
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001494 bio_put(bio);
1495 }
1496
1497 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08001498 scrub_recheck_block_checksum(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001499}
1500
Miao Xie17a9be22014-07-24 11:37:08 +08001501static inline int scrub_check_fsid(u8 fsid[],
1502 struct scrub_page *spage)
1503{
1504 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1505 int ret;
1506
Anand Jain44880fd2017-07-29 17:50:09 +08001507 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
Miao Xie17a9be22014-07-24 11:37:08 +08001508 return !ret;
1509}
1510
Zhao Leiba7cf982015-08-24 21:18:02 +08001511static void scrub_recheck_block_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001512{
Zhao Leiba7cf982015-08-24 21:18:02 +08001513 sblock->header_error = 0;
1514 sblock->checksum_error = 0;
1515 sblock->generation_error = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001516
Zhao Leiba7cf982015-08-24 21:18:02 +08001517 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1518 scrub_checksum_data(sblock);
1519 else
1520 scrub_checksum_tree_block(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001521}
1522
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001523static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001524 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001525{
1526 int page_num;
1527 int ret = 0;
1528
1529 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1530 int ret_sub;
1531
1532 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1533 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001534 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001535 if (ret_sub)
1536 ret = ret_sub;
1537 }
1538
1539 return ret;
1540}
1541
1542static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1543 struct scrub_block *sblock_good,
1544 int page_num, int force_write)
1545{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001546 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1547 struct scrub_page *page_good = sblock_good->pagev[page_num];
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001548 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001549
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001550 BUG_ON(page_bad->page == NULL);
1551 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001552 if (force_write || sblock_bad->header_error ||
1553 sblock_bad->checksum_error || page_bad->io_error) {
1554 struct bio *bio;
1555 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001556
Stefan Behrensff023aa2012-11-06 11:43:11 +01001557 if (!page_bad->dev->bdev) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001558 btrfs_warn_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001559 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001560 return -EIO;
1561 }
1562
David Sterbac5e4c3d2017-06-12 17:29:41 +02001563 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001564 bio_set_dev(bio, page_bad->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001565 bio->bi_iter.bi_sector = page_bad->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02001566 bio->bi_opf = REQ_OP_WRITE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001567
1568 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1569 if (PAGE_SIZE != ret) {
1570 bio_put(bio);
1571 return -EIO;
1572 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001573
Mike Christie4e49ea42016-06-05 14:31:41 -05001574 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001575 btrfs_dev_stat_inc_and_print(page_bad->dev,
1576 BTRFS_DEV_STAT_WRITE_ERRS);
David Sterbae37abe92018-04-04 17:20:52 +02001577 atomic64_inc(&fs_info->dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001578 bio_put(bio);
1579 return -EIO;
1580 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001581 bio_put(bio);
1582 }
1583
1584 return 0;
1585}
1586
Stefan Behrensff023aa2012-11-06 11:43:11 +01001587static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1588{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001589 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001590 int page_num;
1591
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001592 /*
1593 * This block is used for the check of the parity on the source device,
1594 * so the data needn't be written into the destination device.
1595 */
1596 if (sblock->sparity)
1597 return;
1598
Stefan Behrensff023aa2012-11-06 11:43:11 +01001599 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1600 int ret;
1601
1602 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1603 if (ret)
David Sterbae37abe92018-04-04 17:20:52 +02001604 atomic64_inc(&fs_info->dev_replace.num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001605 }
1606}
1607
1608static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1609 int page_num)
1610{
1611 struct scrub_page *spage = sblock->pagev[page_num];
1612
1613 BUG_ON(spage->page == NULL);
1614 if (spage->io_error) {
1615 void *mapped_buffer = kmap_atomic(spage->page);
1616
David Sterba619a9742017-03-29 20:48:44 +02001617 clear_page(mapped_buffer);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001618 flush_dcache_page(spage->page);
1619 kunmap_atomic(mapped_buffer);
1620 }
1621 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1622}
1623
1624static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1625 struct scrub_page *spage)
1626{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001627 struct scrub_bio *sbio;
1628 int ret;
1629
David Sterba3fb99302017-05-16 19:10:32 +02001630 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001631again:
David Sterba3fb99302017-05-16 19:10:32 +02001632 if (!sctx->wr_curr_bio) {
Filipe Mananaa5fb1142018-11-26 20:07:17 +00001633 unsigned int nofs_flag;
1634
1635 /*
1636 * We must use GFP_NOFS because the scrub task might be waiting
1637 * for a worker task executing this function and in turn a
1638 * transaction commit might be waiting the scrub task to pause
1639 * (which needs to wait for all the worker tasks to complete
1640 * before pausing).
1641 */
1642 nofs_flag = memalloc_nofs_save();
David Sterba3fb99302017-05-16 19:10:32 +02001643 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
David Sterba58c4e172016-02-11 10:49:42 +01001644 GFP_KERNEL);
Filipe Mananaa5fb1142018-11-26 20:07:17 +00001645 memalloc_nofs_restore(nofs_flag);
David Sterba3fb99302017-05-16 19:10:32 +02001646 if (!sctx->wr_curr_bio) {
1647 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001648 return -ENOMEM;
1649 }
David Sterba3fb99302017-05-16 19:10:32 +02001650 sctx->wr_curr_bio->sctx = sctx;
1651 sctx->wr_curr_bio->page_count = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001652 }
David Sterba3fb99302017-05-16 19:10:32 +02001653 sbio = sctx->wr_curr_bio;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001654 if (sbio->page_count == 0) {
1655 struct bio *bio;
1656
1657 sbio->physical = spage->physical_for_dev_replace;
1658 sbio->logical = spage->logical;
David Sterba3fb99302017-05-16 19:10:32 +02001659 sbio->dev = sctx->wr_tgtdev;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001660 bio = sbio->bio;
1661 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02001662 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001663 sbio->bio = bio;
1664 }
1665
1666 bio->bi_private = sbio;
1667 bio->bi_end_io = scrub_wr_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001668 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001669 bio->bi_iter.bi_sector = sbio->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02001670 bio->bi_opf = REQ_OP_WRITE;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001671 sbio->status = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001672 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1673 spage->physical_for_dev_replace ||
1674 sbio->logical + sbio->page_count * PAGE_SIZE !=
1675 spage->logical) {
1676 scrub_wr_submit(sctx);
1677 goto again;
1678 }
1679
1680 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1681 if (ret != PAGE_SIZE) {
1682 if (sbio->page_count < 1) {
1683 bio_put(sbio->bio);
1684 sbio->bio = NULL;
David Sterba3fb99302017-05-16 19:10:32 +02001685 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001686 return -EIO;
1687 }
1688 scrub_wr_submit(sctx);
1689 goto again;
1690 }
1691
1692 sbio->pagev[sbio->page_count] = spage;
1693 scrub_page_get(spage);
1694 sbio->page_count++;
David Sterba3fb99302017-05-16 19:10:32 +02001695 if (sbio->page_count == sctx->pages_per_wr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001696 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02001697 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001698
1699 return 0;
1700}
1701
1702static void scrub_wr_submit(struct scrub_ctx *sctx)
1703{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001704 struct scrub_bio *sbio;
1705
David Sterba3fb99302017-05-16 19:10:32 +02001706 if (!sctx->wr_curr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001707 return;
1708
David Sterba3fb99302017-05-16 19:10:32 +02001709 sbio = sctx->wr_curr_bio;
1710 sctx->wr_curr_bio = NULL;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001711 WARN_ON(!sbio->bio->bi_disk);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001712 scrub_pending_bio_inc(sctx);
1713 /* process all writes in a single worker thread. Then the block layer
1714 * orders the requests before sending them to the driver which
1715 * doubled the write performance on spinning disks when measured
1716 * with Linux 3.5 */
Mike Christie4e49ea42016-06-05 14:31:41 -05001717 btrfsic_submit_bio(sbio->bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001718}
1719
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001720static void scrub_wr_bio_end_io(struct bio *bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001721{
1722 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001723 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001724
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001725 sbio->status = bio->bi_status;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001726 sbio->bio = bio;
1727
Liu Bo9e0af232014-08-15 23:36:53 +08001728 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1729 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001730 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001731}
1732
1733static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1734{
1735 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1736 struct scrub_ctx *sctx = sbio->sctx;
1737 int i;
1738
1739 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001740 if (sbio->status) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001741 struct btrfs_dev_replace *dev_replace =
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001742 &sbio->sctx->fs_info->dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001743
1744 for (i = 0; i < sbio->page_count; i++) {
1745 struct scrub_page *spage = sbio->pagev[i];
1746
1747 spage->io_error = 1;
David Sterbae37abe92018-04-04 17:20:52 +02001748 atomic64_inc(&dev_replace->num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001749 }
1750 }
1751
1752 for (i = 0; i < sbio->page_count; i++)
1753 scrub_page_put(sbio->pagev[i]);
1754
1755 bio_put(sbio->bio);
1756 kfree(sbio);
1757 scrub_pending_bio_dec(sctx);
1758}
1759
1760static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001761{
1762 u64 flags;
1763 int ret;
1764
Zhao Leiba7cf982015-08-24 21:18:02 +08001765 /*
1766 * No need to initialize these stats currently,
1767 * because this function only use return value
1768 * instead of these stats value.
1769 *
1770 * Todo:
1771 * always use stats
1772 */
1773 sblock->header_error = 0;
1774 sblock->generation_error = 0;
1775 sblock->checksum_error = 0;
1776
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001777 WARN_ON(sblock->page_count < 1);
1778 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001779 ret = 0;
1780 if (flags & BTRFS_EXTENT_FLAG_DATA)
1781 ret = scrub_checksum_data(sblock);
1782 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1783 ret = scrub_checksum_tree_block(sblock);
1784 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1785 (void)scrub_checksum_super(sblock);
1786 else
1787 WARN_ON(1);
1788 if (ret)
1789 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001790
1791 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001792}
1793
1794static int scrub_checksum_data(struct scrub_block *sblock)
1795{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001796 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001797 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001798 u8 *on_disk_csum;
1799 struct page *page;
1800 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001801 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001802 u64 len;
1803 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001804
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001805 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001806 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001807 return 0;
1808
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001809 on_disk_csum = sblock->pagev[0]->csum;
1810 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001811 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001812
David Sterba25cc1222017-05-16 19:10:41 +02001813 len = sctx->fs_info->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001814 index = 0;
1815 for (;;) {
1816 u64 l = min_t(u64, len, PAGE_SIZE);
1817
Liu Bob0496682013-03-14 14:57:45 +00001818 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001819 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001820 len -= l;
1821 if (len == 0)
1822 break;
1823 index++;
1824 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001825 BUG_ON(!sblock->pagev[index]->page);
1826 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001827 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001828 }
1829
Arne Jansena2de7332011-03-08 14:14:00 +01001830 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001831 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001832 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001833
Zhao Leiba7cf982015-08-24 21:18:02 +08001834 return sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001835}
1836
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001837static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001838{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001839 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001840 struct btrfs_header *h;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001841 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001842 u8 calculated_csum[BTRFS_CSUM_SIZE];
1843 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1844 struct page *page;
1845 void *mapped_buffer;
1846 u64 mapped_size;
1847 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001848 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001849 u64 len;
1850 int index;
1851
1852 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001853 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001854 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001855 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001856 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001857
1858 /*
1859 * we don't use the getter functions here, as we
1860 * a) don't have an extent buffer and
1861 * b) the page is already kmapped
1862 */
Qu Wenruo3cae2102013-07-16 11:19:18 +08001863 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Zhao Leiba7cf982015-08-24 21:18:02 +08001864 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001865
Zhao Leiba7cf982015-08-24 21:18:02 +08001866 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1867 sblock->header_error = 1;
1868 sblock->generation_error = 1;
1869 }
Arne Jansena2de7332011-03-08 14:14:00 +01001870
Miao Xie17a9be22014-07-24 11:37:08 +08001871 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Zhao Leiba7cf982015-08-24 21:18:02 +08001872 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001873
1874 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1875 BTRFS_UUID_SIZE))
Zhao Leiba7cf982015-08-24 21:18:02 +08001876 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001877
David Sterba25cc1222017-05-16 19:10:41 +02001878 len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001879 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1880 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1881 index = 0;
1882 for (;;) {
1883 u64 l = min_t(u64, len, mapped_size);
1884
Liu Bob0496682013-03-14 14:57:45 +00001885 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001886 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001887 len -= l;
1888 if (len == 0)
1889 break;
1890 index++;
1891 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001892 BUG_ON(!sblock->pagev[index]->page);
1893 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001894 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001895 mapped_size = PAGE_SIZE;
1896 p = mapped_buffer;
1897 }
1898
1899 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001900 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001901 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001902
Zhao Leiba7cf982015-08-24 21:18:02 +08001903 return sblock->header_error || sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001904}
1905
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001906static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001907{
1908 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001909 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001910 u8 calculated_csum[BTRFS_CSUM_SIZE];
1911 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1912 struct page *page;
1913 void *mapped_buffer;
1914 u64 mapped_size;
1915 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001916 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001917 int fail_gen = 0;
1918 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001919 u64 len;
1920 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001921
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001922 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001923 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001924 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001925 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001926 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001927
Qu Wenruo3cae2102013-07-16 11:19:18 +08001928 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001929 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001930
Qu Wenruo3cae2102013-07-16 11:19:18 +08001931 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001932 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001933
Miao Xie17a9be22014-07-24 11:37:08 +08001934 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001935 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001936
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001937 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1938 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1939 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1940 index = 0;
1941 for (;;) {
1942 u64 l = min_t(u64, len, mapped_size);
1943
Liu Bob0496682013-03-14 14:57:45 +00001944 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001945 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001946 len -= l;
1947 if (len == 0)
1948 break;
1949 index++;
1950 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001951 BUG_ON(!sblock->pagev[index]->page);
1952 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001953 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001954 mapped_size = PAGE_SIZE;
1955 p = mapped_buffer;
1956 }
1957
1958 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001959 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001960 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001961
Stefan Behrens442a4f62012-05-25 16:06:08 +02001962 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01001963 /*
1964 * if we find an error in a super block, we just report it.
1965 * They will get written with the next transaction commit
1966 * anyway
1967 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001968 spin_lock(&sctx->stat_lock);
1969 ++sctx->stat.super_errors;
1970 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001971 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001972 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001973 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1974 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001975 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001976 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01001977 }
1978
Stefan Behrens442a4f62012-05-25 16:06:08 +02001979 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001980}
1981
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001982static void scrub_block_get(struct scrub_block *sblock)
1983{
Elena Reshetova186debd2017-03-03 10:55:23 +02001984 refcount_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001985}
1986
1987static void scrub_block_put(struct scrub_block *sblock)
1988{
Elena Reshetova186debd2017-03-03 10:55:23 +02001989 if (refcount_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001990 int i;
1991
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001992 if (sblock->sparity)
1993 scrub_parity_put(sblock->sparity);
1994
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001995 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001996 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001997 kfree(sblock);
1998 }
1999}
2000
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002001static void scrub_page_get(struct scrub_page *spage)
2002{
Zhao Lei57019342015-01-20 15:11:45 +08002003 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002004}
2005
2006static void scrub_page_put(struct scrub_page *spage)
2007{
Zhao Lei57019342015-01-20 15:11:45 +08002008 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002009 if (spage->page)
2010 __free_page(spage->page);
2011 kfree(spage);
2012 }
2013}
2014
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002015static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002016{
2017 struct scrub_bio *sbio;
2018
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002019 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002020 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002021
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002022 sbio = sctx->bios[sctx->curr];
2023 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002024 scrub_pending_bio_inc(sctx);
Mike Christie4e49ea42016-06-05 14:31:41 -05002025 btrfsic_submit_bio(sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002026}
2027
Stefan Behrensff023aa2012-11-06 11:43:11 +01002028static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2029 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002030{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002031 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002032 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002033 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002034
2035again:
2036 /*
2037 * grab a fresh bio or wait for one to become available
2038 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002039 while (sctx->curr == -1) {
2040 spin_lock(&sctx->list_lock);
2041 sctx->curr = sctx->first_free;
2042 if (sctx->curr != -1) {
2043 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2044 sctx->bios[sctx->curr]->next_free = -1;
2045 sctx->bios[sctx->curr]->page_count = 0;
2046 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002047 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002048 spin_unlock(&sctx->list_lock);
2049 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002050 }
2051 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002052 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002053 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002054 struct bio *bio;
2055
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002056 sbio->physical = spage->physical;
2057 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002058 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002059 bio = sbio->bio;
2060 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02002061 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002062 sbio->bio = bio;
2063 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002064
2065 bio->bi_private = sbio;
2066 bio->bi_end_io = scrub_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002067 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002068 bio->bi_iter.bi_sector = sbio->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02002069 bio->bi_opf = REQ_OP_READ;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002070 sbio->status = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002071 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2072 spage->physical ||
2073 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002074 spage->logical ||
2075 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002076 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002077 goto again;
2078 }
2079
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002080 sbio->pagev[sbio->page_count] = spage;
2081 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2082 if (ret != PAGE_SIZE) {
2083 if (sbio->page_count < 1) {
2084 bio_put(sbio->bio);
2085 sbio->bio = NULL;
2086 return -EIO;
2087 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002088 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002089 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002090 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002091
Stefan Behrensff023aa2012-11-06 11:43:11 +01002092 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002093 atomic_inc(&sblock->outstanding_pages);
2094 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002095 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002096 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002097
2098 return 0;
2099}
2100
Linus Torvalds22365972015-09-05 15:14:43 -07002101static void scrub_missing_raid56_end_io(struct bio *bio)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002102{
2103 struct scrub_block *sblock = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002104 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002105
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002106 if (bio->bi_status)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002107 sblock->no_io_error_seen = 0;
2108
Scott Talbert46732722016-05-09 09:14:28 -04002109 bio_put(bio);
2110
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002111 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2112}
2113
2114static void scrub_missing_raid56_worker(struct btrfs_work *work)
2115{
2116 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2117 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002118 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002119 u64 logical;
2120 struct btrfs_device *dev;
2121
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002122 logical = sblock->pagev[0]->logical;
2123 dev = sblock->pagev[0]->dev;
2124
Zhao Leiaffe4a52015-08-24 21:32:06 +08002125 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08002126 scrub_recheck_block_checksum(sblock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002127
2128 if (!sblock->no_io_error_seen) {
2129 spin_lock(&sctx->stat_lock);
2130 sctx->stat.read_errors++;
2131 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002132 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002133 "IO error rebuilding logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002134 logical, rcu_str_deref(dev->name));
2135 } else if (sblock->header_error || sblock->checksum_error) {
2136 spin_lock(&sctx->stat_lock);
2137 sctx->stat.uncorrectable_errors++;
2138 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002139 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002140 "failed to rebuild valid logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002141 logical, rcu_str_deref(dev->name));
2142 } else {
2143 scrub_write_block_to_dev_replace(sblock);
2144 }
2145
2146 scrub_block_put(sblock);
2147
David Sterba2073c4c2017-03-31 17:12:51 +02002148 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002149 mutex_lock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002150 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002151 mutex_unlock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002152 }
2153
2154 scrub_pending_bio_dec(sctx);
2155}
2156
2157static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2158{
2159 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002160 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002161 u64 length = sblock->page_count * PAGE_SIZE;
2162 u64 logical = sblock->pagev[0]->logical;
Zhao Leif1fee652016-05-17 17:37:38 +08002163 struct btrfs_bio *bbio = NULL;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002164 struct bio *bio;
2165 struct btrfs_raid_bio *rbio;
2166 int ret;
2167 int i;
2168
Qu Wenruoae6529c2017-03-29 09:33:21 +08002169 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002170 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
David Sterba825ad4c2017-03-28 14:45:22 +02002171 &length, &bbio);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002172 if (ret || !bbio || !bbio->raid_map)
2173 goto bbio_out;
2174
2175 if (WARN_ON(!sctx->is_dev_replace ||
2176 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2177 /*
2178 * We shouldn't be scrubbing a missing device. Even for dev
2179 * replace, we should only get here for RAID 5/6. We either
2180 * managed to mount something with no mirrors remaining or
2181 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2182 */
2183 goto bbio_out;
2184 }
2185
David Sterbac5e4c3d2017-06-12 17:29:41 +02002186 bio = btrfs_io_bio_alloc(0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002187 bio->bi_iter.bi_sector = logical >> 9;
2188 bio->bi_private = sblock;
2189 bio->bi_end_io = scrub_missing_raid56_end_io;
2190
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002191 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002192 if (!rbio)
2193 goto rbio_out;
2194
2195 for (i = 0; i < sblock->page_count; i++) {
2196 struct scrub_page *spage = sblock->pagev[i];
2197
2198 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2199 }
2200
2201 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2202 scrub_missing_raid56_worker, NULL, NULL);
2203 scrub_block_get(sblock);
2204 scrub_pending_bio_inc(sctx);
2205 raid56_submit_missing_rbio(rbio);
2206 return;
2207
2208rbio_out:
2209 bio_put(bio);
2210bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002211 btrfs_bio_counter_dec(fs_info);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002212 btrfs_put_bbio(bbio);
2213 spin_lock(&sctx->stat_lock);
2214 sctx->stat.malloc_errors++;
2215 spin_unlock(&sctx->stat_lock);
2216}
2217
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002218static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002219 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002220 u64 gen, int mirror_num, u8 *csum, int force,
2221 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002222{
2223 struct scrub_block *sblock;
2224 int index;
2225
David Sterba58c4e172016-02-11 10:49:42 +01002226 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002227 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002228 spin_lock(&sctx->stat_lock);
2229 sctx->stat.malloc_errors++;
2230 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002231 return -ENOMEM;
2232 }
2233
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002234 /* one ref inside this function, plus one for each page added to
2235 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002236 refcount_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002237 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002238 sblock->no_io_error_seen = 1;
2239
2240 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002241 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002242 u64 l = min_t(u64, len, PAGE_SIZE);
2243
David Sterba58c4e172016-02-11 10:49:42 +01002244 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002245 if (!spage) {
2246leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002247 spin_lock(&sctx->stat_lock);
2248 sctx->stat.malloc_errors++;
2249 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002250 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002251 return -ENOMEM;
2252 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002253 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2254 scrub_page_get(spage);
2255 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002256 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002257 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002258 spage->flags = flags;
2259 spage->generation = gen;
2260 spage->logical = logical;
2261 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002262 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002263 spage->mirror_num = mirror_num;
2264 if (csum) {
2265 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002266 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002267 } else {
2268 spage->have_csum = 0;
2269 }
2270 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002271 spage->page = alloc_page(GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002272 if (!spage->page)
2273 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002274 len -= l;
2275 logical += l;
2276 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002277 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002278 }
2279
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002280 WARN_ON(sblock->page_count == 0);
Anand Jaine6e674b2017-12-04 12:54:54 +08002281 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002282 /*
2283 * This case should only be hit for RAID 5/6 device replace. See
2284 * the comment in scrub_missing_raid56_pages() for details.
2285 */
2286 scrub_missing_raid56_pages(sblock);
2287 } else {
2288 for (index = 0; index < sblock->page_count; index++) {
2289 struct scrub_page *spage = sblock->pagev[index];
2290 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002291
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002292 ret = scrub_add_page_to_rd_bio(sctx, spage);
2293 if (ret) {
2294 scrub_block_put(sblock);
2295 return ret;
2296 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002297 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002298
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002299 if (force)
2300 scrub_submit(sctx);
2301 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002302
2303 /* last one frees, either here or in bio completion for last page */
2304 scrub_block_put(sblock);
2305 return 0;
2306}
2307
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002308static void scrub_bio_end_io(struct bio *bio)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002309{
2310 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002311 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002312
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002313 sbio->status = bio->bi_status;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002314 sbio->bio = bio;
2315
Qu Wenruo0339ef22014-02-28 10:46:17 +08002316 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002317}
2318
2319static void scrub_bio_end_io_worker(struct btrfs_work *work)
2320{
2321 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002322 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002323 int i;
2324
Stefan Behrensff023aa2012-11-06 11:43:11 +01002325 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002326 if (sbio->status) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002327 for (i = 0; i < sbio->page_count; i++) {
2328 struct scrub_page *spage = sbio->pagev[i];
2329
2330 spage->io_error = 1;
2331 spage->sblock->no_io_error_seen = 0;
2332 }
2333 }
2334
2335 /* now complete the scrub_block items that have all pages completed */
2336 for (i = 0; i < sbio->page_count; i++) {
2337 struct scrub_page *spage = sbio->pagev[i];
2338 struct scrub_block *sblock = spage->sblock;
2339
2340 if (atomic_dec_and_test(&sblock->outstanding_pages))
2341 scrub_block_complete(sblock);
2342 scrub_block_put(sblock);
2343 }
2344
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002345 bio_put(sbio->bio);
2346 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002347 spin_lock(&sctx->list_lock);
2348 sbio->next_free = sctx->first_free;
2349 sctx->first_free = sbio->index;
2350 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002351
David Sterba2073c4c2017-03-31 17:12:51 +02002352 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002353 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002354 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002355 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002356 }
2357
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002358 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002359}
2360
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002361static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2362 unsigned long *bitmap,
2363 u64 start, u64 len)
2364{
Liu Bo972d7212017-04-03 13:45:33 -07002365 u64 offset;
David Sterba7736b0a2017-03-31 18:02:48 +02002366 u64 nsectors64;
2367 u32 nsectors;
Jeff Mahoneyda170662016-06-15 09:22:56 -04002368 int sectorsize = sparity->sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002369
2370 if (len >= sparity->stripe_len) {
2371 bitmap_set(bitmap, 0, sparity->nsectors);
2372 return;
2373 }
2374
2375 start -= sparity->logic_start;
Liu Bo972d7212017-04-03 13:45:33 -07002376 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2377 offset = div_u64(offset, sectorsize);
David Sterba7736b0a2017-03-31 18:02:48 +02002378 nsectors64 = div_u64(len, sectorsize);
2379
2380 ASSERT(nsectors64 < UINT_MAX);
2381 nsectors = (u32)nsectors64;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002382
2383 if (offset + nsectors <= sparity->nsectors) {
2384 bitmap_set(bitmap, offset, nsectors);
2385 return;
2386 }
2387
2388 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2389 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2390}
2391
2392static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2393 u64 start, u64 len)
2394{
2395 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2396}
2397
2398static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2399 u64 start, u64 len)
2400{
2401 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2402}
2403
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002404static void scrub_block_complete(struct scrub_block *sblock)
2405{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002406 int corrupted = 0;
2407
Stefan Behrensff023aa2012-11-06 11:43:11 +01002408 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002409 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002410 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002411 } else {
2412 /*
2413 * if has checksum error, write via repair mechanism in
2414 * dev replace case, otherwise write here in dev replace
2415 * case.
2416 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002417 corrupted = scrub_checksum(sblock);
2418 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002419 scrub_write_block_to_dev_replace(sblock);
2420 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002421
2422 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2423 u64 start = sblock->pagev[0]->logical;
2424 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2425 PAGE_SIZE;
2426
2427 scrub_parity_mark_sectors_error(sblock->sparity,
2428 start, end - start);
2429 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002430}
2431
Zhao Lei3b5753e2015-08-24 22:03:02 +08002432static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002433{
2434 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002435 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002436 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002437
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002438 while (!list_empty(&sctx->csum_list)) {
2439 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002440 struct btrfs_ordered_sum, list);
2441 if (sum->bytenr > logical)
2442 return 0;
2443 if (sum->bytenr + sum->len > logical)
2444 break;
2445
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002446 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002447 list_del(&sum->list);
2448 kfree(sum);
2449 sum = NULL;
2450 }
2451 if (!sum)
2452 return 0;
2453
David Sterba1d1bf922017-03-31 18:02:48 +02002454 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2455 ASSERT(index < UINT_MAX);
2456
David Sterba25cc1222017-05-16 19:10:41 +02002457 num_sectors = sum->len / sctx->fs_info->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002458 memcpy(csum, sum->sums + index, sctx->csum_size);
2459 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002460 list_del(&sum->list);
2461 kfree(sum);
2462 }
Miao Xief51a4a12013-06-19 10:36:09 +08002463 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002464}
2465
2466/* scrub extent tries to collect up to 64 kB for each bio */
Liu Bo6ca17652018-03-07 12:08:09 -07002467static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2468 u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002469 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002470 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002471{
2472 int ret;
2473 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002474 u32 blocksize;
2475
2476 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002477 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2478 blocksize = map->stripe_len;
2479 else
2480 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002481 spin_lock(&sctx->stat_lock);
2482 sctx->stat.data_extents_scrubbed++;
2483 sctx->stat.data_bytes_scrubbed += len;
2484 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002485 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002486 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2487 blocksize = map->stripe_len;
2488 else
2489 blocksize = sctx->fs_info->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002490 spin_lock(&sctx->stat_lock);
2491 sctx->stat.tree_extents_scrubbed++;
2492 sctx->stat.tree_bytes_scrubbed += len;
2493 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002494 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002495 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002496 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002497 }
Arne Jansena2de7332011-03-08 14:14:00 +01002498
2499 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002500 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002501 int have_csum = 0;
2502
2503 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2504 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002505 have_csum = scrub_find_csum(sctx, logical, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002506 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002507 ++sctx->stat.no_csum;
Arne Jansena2de7332011-03-08 14:14:00 +01002508 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002509 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002510 mirror_num, have_csum ? csum : NULL, 0,
2511 physical_for_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01002512 if (ret)
2513 return ret;
2514 len -= l;
2515 logical += l;
2516 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002517 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002518 }
2519 return 0;
2520}
2521
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002522static int scrub_pages_for_parity(struct scrub_parity *sparity,
2523 u64 logical, u64 len,
2524 u64 physical, struct btrfs_device *dev,
2525 u64 flags, u64 gen, int mirror_num, u8 *csum)
2526{
2527 struct scrub_ctx *sctx = sparity->sctx;
2528 struct scrub_block *sblock;
2529 int index;
2530
David Sterba58c4e172016-02-11 10:49:42 +01002531 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002532 if (!sblock) {
2533 spin_lock(&sctx->stat_lock);
2534 sctx->stat.malloc_errors++;
2535 spin_unlock(&sctx->stat_lock);
2536 return -ENOMEM;
2537 }
2538
2539 /* one ref inside this function, plus one for each page added to
2540 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002541 refcount_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002542 sblock->sctx = sctx;
2543 sblock->no_io_error_seen = 1;
2544 sblock->sparity = sparity;
2545 scrub_parity_get(sparity);
2546
2547 for (index = 0; len > 0; index++) {
2548 struct scrub_page *spage;
2549 u64 l = min_t(u64, len, PAGE_SIZE);
2550
David Sterba58c4e172016-02-11 10:49:42 +01002551 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002552 if (!spage) {
2553leave_nomem:
2554 spin_lock(&sctx->stat_lock);
2555 sctx->stat.malloc_errors++;
2556 spin_unlock(&sctx->stat_lock);
2557 scrub_block_put(sblock);
2558 return -ENOMEM;
2559 }
2560 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2561 /* For scrub block */
2562 scrub_page_get(spage);
2563 sblock->pagev[index] = spage;
2564 /* For scrub parity */
2565 scrub_page_get(spage);
2566 list_add_tail(&spage->list, &sparity->spages);
2567 spage->sblock = sblock;
2568 spage->dev = dev;
2569 spage->flags = flags;
2570 spage->generation = gen;
2571 spage->logical = logical;
2572 spage->physical = physical;
2573 spage->mirror_num = mirror_num;
2574 if (csum) {
2575 spage->have_csum = 1;
2576 memcpy(spage->csum, csum, sctx->csum_size);
2577 } else {
2578 spage->have_csum = 0;
2579 }
2580 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002581 spage->page = alloc_page(GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002582 if (!spage->page)
2583 goto leave_nomem;
2584 len -= l;
2585 logical += l;
2586 physical += l;
2587 }
2588
2589 WARN_ON(sblock->page_count == 0);
2590 for (index = 0; index < sblock->page_count; index++) {
2591 struct scrub_page *spage = sblock->pagev[index];
2592 int ret;
2593
2594 ret = scrub_add_page_to_rd_bio(sctx, spage);
2595 if (ret) {
2596 scrub_block_put(sblock);
2597 return ret;
2598 }
2599 }
2600
2601 /* last one frees, either here or in bio completion for last page */
2602 scrub_block_put(sblock);
2603 return 0;
2604}
2605
2606static int scrub_extent_for_parity(struct scrub_parity *sparity,
2607 u64 logical, u64 len,
2608 u64 physical, struct btrfs_device *dev,
2609 u64 flags, u64 gen, int mirror_num)
2610{
2611 struct scrub_ctx *sctx = sparity->sctx;
2612 int ret;
2613 u8 csum[BTRFS_CSUM_SIZE];
2614 u32 blocksize;
2615
Anand Jaine6e674b2017-12-04 12:54:54 +08002616 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval4a770892015-06-19 11:52:52 -07002617 scrub_parity_mark_sectors_error(sparity, logical, len);
2618 return 0;
2619 }
2620
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002621 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002622 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002623 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002624 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002625 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002626 blocksize = sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002627 WARN_ON(1);
2628 }
2629
2630 while (len) {
2631 u64 l = min_t(u64, len, blocksize);
2632 int have_csum = 0;
2633
2634 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2635 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002636 have_csum = scrub_find_csum(sctx, logical, csum);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002637 if (have_csum == 0)
2638 goto skip;
2639 }
2640 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2641 flags, gen, mirror_num,
2642 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002643 if (ret)
2644 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002645skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002646 len -= l;
2647 logical += l;
2648 physical += l;
2649 }
2650 return 0;
2651}
2652
Wang Shilong3b080b22014-04-01 18:01:43 +08002653/*
2654 * Given a physical address, this will calculate it's
2655 * logical offset. if this is a parity stripe, it will return
2656 * the most left data stripe's logical offset.
2657 *
2658 * return 0 if it is a data stripe, 1 means parity stripe.
2659 */
2660static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002661 struct map_lookup *map, u64 *offset,
2662 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002663{
2664 int i;
2665 int j = 0;
2666 u64 stripe_nr;
2667 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002668 u32 stripe_index;
2669 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002670
2671 last_offset = (physical - map->stripes[num].physical) *
2672 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002673 if (stripe_start)
2674 *stripe_start = last_offset;
2675
Wang Shilong3b080b22014-04-01 18:01:43 +08002676 *offset = last_offset;
2677 for (i = 0; i < nr_data_stripes(map); i++) {
2678 *offset = last_offset + i * map->stripe_len;
2679
Liu Bo42c61ab2017-04-03 13:45:24 -07002680 stripe_nr = div64_u64(*offset, map->stripe_len);
David Sterbab8b93ad2015-01-16 17:26:13 +01002681 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002682
2683 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002684 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002685 /* calculate which stripe this data locates */
2686 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002687 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002688 if (stripe_index == num)
2689 return 0;
2690 if (stripe_index < num)
2691 j++;
2692 }
2693 *offset = last_offset + j * map->stripe_len;
2694 return 1;
2695}
2696
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002697static void scrub_free_parity(struct scrub_parity *sparity)
2698{
2699 struct scrub_ctx *sctx = sparity->sctx;
2700 struct scrub_page *curr, *next;
2701 int nbits;
2702
2703 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2704 if (nbits) {
2705 spin_lock(&sctx->stat_lock);
2706 sctx->stat.read_errors += nbits;
2707 sctx->stat.uncorrectable_errors += nbits;
2708 spin_unlock(&sctx->stat_lock);
2709 }
2710
2711 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2712 list_del_init(&curr->list);
2713 scrub_page_put(curr);
2714 }
2715
2716 kfree(sparity);
2717}
2718
Zhao Lei20b2e302015-06-04 20:09:15 +08002719static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2720{
2721 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2722 work);
2723 struct scrub_ctx *sctx = sparity->sctx;
2724
2725 scrub_free_parity(sparity);
2726 scrub_pending_bio_dec(sctx);
2727}
2728
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002729static void scrub_parity_bio_endio(struct bio *bio)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002730{
2731 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002732 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002733
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002734 if (bio->bi_status)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002735 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2736 sparity->nsectors);
2737
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002738 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08002739
2740 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2741 scrub_parity_bio_endio_worker, NULL, NULL);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002742 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002743}
2744
2745static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2746{
2747 struct scrub_ctx *sctx = sparity->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002748 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002749 struct bio *bio;
2750 struct btrfs_raid_bio *rbio;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002751 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002752 u64 length;
2753 int ret;
2754
2755 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2756 sparity->nsectors))
2757 goto out;
2758
Zhao Leia0dd59d2015-07-21 15:42:26 +08002759 length = sparity->logic_end - sparity->logic_start;
Qu Wenruoae6529c2017-03-29 09:33:21 +08002760
2761 btrfs_bio_counter_inc_blocked(fs_info);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002762 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
David Sterba825ad4c2017-03-28 14:45:22 +02002763 &length, &bbio);
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002764 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002765 goto bbio_out;
2766
David Sterbac5e4c3d2017-06-12 17:29:41 +02002767 bio = btrfs_io_bio_alloc(0);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002768 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2769 bio->bi_private = sparity;
2770 bio->bi_end_io = scrub_parity_bio_endio;
2771
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002772 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002773 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002774 sparity->dbitmap,
2775 sparity->nsectors);
2776 if (!rbio)
2777 goto rbio_out;
2778
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002779 scrub_pending_bio_inc(sctx);
2780 raid56_parity_submit_scrub_rbio(rbio);
2781 return;
2782
2783rbio_out:
2784 bio_put(bio);
2785bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002786 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08002787 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002788 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2789 sparity->nsectors);
2790 spin_lock(&sctx->stat_lock);
2791 sctx->stat.malloc_errors++;
2792 spin_unlock(&sctx->stat_lock);
2793out:
2794 scrub_free_parity(sparity);
2795}
2796
2797static inline int scrub_calc_parity_bitmap_len(int nsectors)
2798{
Zhao Leibfca9a62014-12-08 19:55:57 +08002799 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002800}
2801
2802static void scrub_parity_get(struct scrub_parity *sparity)
2803{
Elena Reshetova78a76452017-03-03 10:55:24 +02002804 refcount_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002805}
2806
2807static void scrub_parity_put(struct scrub_parity *sparity)
2808{
Elena Reshetova78a76452017-03-03 10:55:24 +02002809 if (!refcount_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002810 return;
2811
2812 scrub_parity_check_and_repair(sparity);
2813}
2814
2815static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2816 struct map_lookup *map,
2817 struct btrfs_device *sdev,
2818 struct btrfs_path *path,
2819 u64 logic_start,
2820 u64 logic_end)
2821{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002822 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002823 struct btrfs_root *root = fs_info->extent_root;
2824 struct btrfs_root *csum_root = fs_info->csum_root;
2825 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07002826 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002827 u64 flags;
2828 int ret;
2829 int slot;
2830 struct extent_buffer *l;
2831 struct btrfs_key key;
2832 u64 generation;
2833 u64 extent_logical;
2834 u64 extent_physical;
2835 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07002836 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002837 struct btrfs_device *extent_dev;
2838 struct scrub_parity *sparity;
2839 int nsectors;
2840 int bitmap_len;
2841 int extent_mirror_num;
2842 int stop_loop = 0;
2843
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002844 nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002845 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2846 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2847 GFP_NOFS);
2848 if (!sparity) {
2849 spin_lock(&sctx->stat_lock);
2850 sctx->stat.malloc_errors++;
2851 spin_unlock(&sctx->stat_lock);
2852 return -ENOMEM;
2853 }
2854
2855 sparity->stripe_len = map->stripe_len;
2856 sparity->nsectors = nsectors;
2857 sparity->sctx = sctx;
2858 sparity->scrub_dev = sdev;
2859 sparity->logic_start = logic_start;
2860 sparity->logic_end = logic_end;
Elena Reshetova78a76452017-03-03 10:55:24 +02002861 refcount_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002862 INIT_LIST_HEAD(&sparity->spages);
2863 sparity->dbitmap = sparity->bitmap;
2864 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2865
2866 ret = 0;
2867 while (logic_start < logic_end) {
2868 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2869 key.type = BTRFS_METADATA_ITEM_KEY;
2870 else
2871 key.type = BTRFS_EXTENT_ITEM_KEY;
2872 key.objectid = logic_start;
2873 key.offset = (u64)-1;
2874
2875 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2876 if (ret < 0)
2877 goto out;
2878
2879 if (ret > 0) {
2880 ret = btrfs_previous_extent_item(root, path, 0);
2881 if (ret < 0)
2882 goto out;
2883 if (ret > 0) {
2884 btrfs_release_path(path);
2885 ret = btrfs_search_slot(NULL, root, &key,
2886 path, 0, 0);
2887 if (ret < 0)
2888 goto out;
2889 }
2890 }
2891
2892 stop_loop = 0;
2893 while (1) {
2894 u64 bytes;
2895
2896 l = path->nodes[0];
2897 slot = path->slots[0];
2898 if (slot >= btrfs_header_nritems(l)) {
2899 ret = btrfs_next_leaf(root, path);
2900 if (ret == 0)
2901 continue;
2902 if (ret < 0)
2903 goto out;
2904
2905 stop_loop = 1;
2906 break;
2907 }
2908 btrfs_item_key_to_cpu(l, &key, slot);
2909
Zhao Leid7cad232015-07-22 13:14:48 +08002910 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2911 key.type != BTRFS_METADATA_ITEM_KEY)
2912 goto next;
2913
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002914 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002915 bytes = fs_info->nodesize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002916 else
2917 bytes = key.offset;
2918
2919 if (key.objectid + bytes <= logic_start)
2920 goto next;
2921
Zhao Leia0dd59d2015-07-21 15:42:26 +08002922 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002923 stop_loop = 1;
2924 break;
2925 }
2926
2927 while (key.objectid >= logic_start + map->stripe_len)
2928 logic_start += map->stripe_len;
2929
2930 extent = btrfs_item_ptr(l, slot,
2931 struct btrfs_extent_item);
2932 flags = btrfs_extent_flags(l, extent);
2933 generation = btrfs_extent_generation(l, extent);
2934
Zhao Leia323e812015-07-23 12:29:49 +08002935 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2936 (key.objectid < logic_start ||
2937 key.objectid + bytes >
2938 logic_start + map->stripe_len)) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002939 btrfs_err(fs_info,
2940 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Zhao Leia323e812015-07-23 12:29:49 +08002941 key.objectid, logic_start);
Zhao Lei9799d2c32015-08-25 21:31:40 +08002942 spin_lock(&sctx->stat_lock);
2943 sctx->stat.uncorrectable_errors++;
2944 spin_unlock(&sctx->stat_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002945 goto next;
2946 }
2947again:
2948 extent_logical = key.objectid;
2949 extent_len = bytes;
2950
2951 if (extent_logical < logic_start) {
2952 extent_len -= logic_start - extent_logical;
2953 extent_logical = logic_start;
2954 }
2955
2956 if (extent_logical + extent_len >
2957 logic_start + map->stripe_len)
2958 extent_len = logic_start + map->stripe_len -
2959 extent_logical;
2960
2961 scrub_parity_mark_sectors_data(sparity, extent_logical,
2962 extent_len);
2963
Omar Sandoval4a770892015-06-19 11:52:52 -07002964 mapped_length = extent_len;
Zhao Leif1fee652016-05-17 17:37:38 +08002965 bbio = NULL;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002966 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2967 extent_logical, &mapped_length, &bbio,
2968 0);
Omar Sandoval4a770892015-06-19 11:52:52 -07002969 if (!ret) {
2970 if (!bbio || mapped_length < extent_len)
2971 ret = -EIO;
2972 }
2973 if (ret) {
2974 btrfs_put_bbio(bbio);
2975 goto out;
2976 }
2977 extent_physical = bbio->stripes[0].physical;
2978 extent_mirror_num = bbio->mirror_num;
2979 extent_dev = bbio->stripes[0].dev;
2980 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002981
2982 ret = btrfs_lookup_csums_range(csum_root,
2983 extent_logical,
2984 extent_logical + extent_len - 1,
2985 &sctx->csum_list, 1);
2986 if (ret)
2987 goto out;
2988
2989 ret = scrub_extent_for_parity(sparity, extent_logical,
2990 extent_len,
2991 extent_physical,
2992 extent_dev, flags,
2993 generation,
2994 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08002995
2996 scrub_free_csums(sctx);
2997
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002998 if (ret)
2999 goto out;
3000
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003001 if (extent_logical + extent_len <
3002 key.objectid + bytes) {
3003 logic_start += map->stripe_len;
3004
3005 if (logic_start >= logic_end) {
3006 stop_loop = 1;
3007 break;
3008 }
3009
3010 if (logic_start < key.objectid + bytes) {
3011 cond_resched();
3012 goto again;
3013 }
3014 }
3015next:
3016 path->slots[0]++;
3017 }
3018
3019 btrfs_release_path(path);
3020
3021 if (stop_loop)
3022 break;
3023
3024 logic_start += map->stripe_len;
3025 }
3026out:
3027 if (ret < 0)
3028 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003029 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003030 scrub_parity_put(sparity);
3031 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003032 mutex_lock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003033 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003034 mutex_unlock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003035
3036 btrfs_release_path(path);
3037 return ret < 0 ? ret : 0;
3038}
3039
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003040static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003041 struct map_lookup *map,
3042 struct btrfs_device *scrub_dev,
Omar Sandoval32934282018-08-14 11:09:52 -07003043 int num, u64 base, u64 length)
Arne Jansena2de7332011-03-08 14:14:00 +01003044{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003045 struct btrfs_path *path, *ppath;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003046 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003047 struct btrfs_root *root = fs_info->extent_root;
3048 struct btrfs_root *csum_root = fs_info->csum_root;
3049 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003050 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003051 u64 flags;
3052 int ret;
3053 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003054 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003055 struct extent_buffer *l;
Arne Jansena2de7332011-03-08 14:14:00 +01003056 u64 physical;
3057 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003058 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003059 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003060 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003061 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003062 struct reada_control *reada1;
3063 struct reada_control *reada2;
David Sterbae6c11f92016-03-24 18:00:53 +01003064 struct btrfs_key key;
Arne Jansen7a262852011-06-10 12:39:23 +02003065 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003066 u64 increment = map->stripe_len;
3067 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003068 u64 extent_logical;
3069 u64 extent_physical;
3070 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003071 u64 stripe_logical;
3072 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003073 struct btrfs_device *extent_dev;
3074 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003075 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003076
Wang Shilong3b080b22014-04-01 18:01:43 +08003077 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003078 offset = 0;
Liu Bo42c61ab2017-04-03 13:45:24 -07003079 nstripes = div64_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003080 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3081 offset = map->stripe_len * num;
3082 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003083 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003084 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3085 int factor = map->num_stripes / map->sub_stripes;
3086 offset = map->stripe_len * (num / map->sub_stripes);
3087 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003088 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003089 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3090 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003091 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003092 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3093 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003094 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003095 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003096 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003097 increment = map->stripe_len * nr_data_stripes(map);
3098 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003099 } else {
3100 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003101 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003102 }
3103
3104 path = btrfs_alloc_path();
3105 if (!path)
3106 return -ENOMEM;
3107
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003108 ppath = btrfs_alloc_path();
3109 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003110 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003111 return -ENOMEM;
3112 }
3113
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003114 /*
3115 * work on commit root. The related disk blocks are static as
3116 * long as COW is applied. This means, it is save to rewrite
3117 * them to repair disk errors without any race conditions
3118 */
Arne Jansena2de7332011-03-08 14:14:00 +01003119 path->search_commit_root = 1;
3120 path->skip_locking = 1;
3121
Gui Hecheng063c54d2015-01-09 09:39:40 +08003122 ppath->search_commit_root = 1;
3123 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003124 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003125 * trigger the readahead for extent tree csum tree and wait for
3126 * completion. During readahead, the scrub is officially paused
3127 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003128 */
3129 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003130 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003131 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003132 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003133 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003134 logic_end += base;
3135 } else {
3136 logic_end = logical + increment * nstripes;
3137 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003138 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003139 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003140 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003141
Arne Jansen7a262852011-06-10 12:39:23 +02003142 /* FIXME it might be better to start readahead at commit root */
David Sterbae6c11f92016-03-24 18:00:53 +01003143 key.objectid = logical;
3144 key.type = BTRFS_EXTENT_ITEM_KEY;
3145 key.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003146 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003147 key_end.type = BTRFS_METADATA_ITEM_KEY;
3148 key_end.offset = (u64)-1;
David Sterbae6c11f92016-03-24 18:00:53 +01003149 reada1 = btrfs_reada_add(root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003150
David Sterbae6c11f92016-03-24 18:00:53 +01003151 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3152 key.type = BTRFS_EXTENT_CSUM_KEY;
3153 key.offset = logical;
Arne Jansen7a262852011-06-10 12:39:23 +02003154 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3155 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003156 key_end.offset = logic_end;
David Sterbae6c11f92016-03-24 18:00:53 +01003157 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003158
Arne Jansen7a262852011-06-10 12:39:23 +02003159 if (!IS_ERR(reada1))
3160 btrfs_reada_wait(reada1);
3161 if (!IS_ERR(reada2))
3162 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003163
Arne Jansena2de7332011-03-08 14:14:00 +01003164
3165 /*
3166 * collect all data csums for the stripe to avoid seeking during
3167 * the scrub. This might currently (crc32) end up to be about 1MB
3168 */
Arne Jansene7786c32011-05-28 20:58:38 +00003169 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003170
Arne Jansena2de7332011-03-08 14:14:00 +01003171 /*
3172 * now find all extents for each stripe and scrub them
3173 */
Arne Jansena2de7332011-03-08 14:14:00 +01003174 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003175 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003176 /*
3177 * canceled?
3178 */
3179 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003180 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003181 ret = -ECANCELED;
3182 goto out;
3183 }
3184 /*
3185 * check to see if we have to pause
3186 */
3187 if (atomic_read(&fs_info->scrub_pause_req)) {
3188 /* push queued extents */
David Sterba2073c4c2017-03-31 17:12:51 +02003189 sctx->flush_all_writes = true;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003190 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003191 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003192 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003193 mutex_unlock(&sctx->wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003194 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003195 atomic_read(&sctx->bios_in_flight) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003196 sctx->flush_all_writes = false;
Wang Shilong3cb09292013-12-04 21:15:19 +08003197 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003198 }
3199
Zhao Leif2f66a22015-07-21 12:22:29 +08003200 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3201 ret = get_raid56_logic_offset(physical, num, map,
3202 &logical,
3203 &stripe_logical);
3204 logical += base;
3205 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003206 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003207 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003208 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003209 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3210 ppath, stripe_logical,
3211 stripe_end);
3212 if (ret)
3213 goto out;
3214 goto skip;
3215 }
3216 }
3217
Wang Shilong7c76edb2014-01-12 21:38:32 +08003218 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3219 key.type = BTRFS_METADATA_ITEM_KEY;
3220 else
3221 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003222 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003223 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003224
3225 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3226 if (ret < 0)
3227 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003228
Arne Jansen8c510322011-06-03 10:09:26 +02003229 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003230 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003231 if (ret < 0)
3232 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003233 if (ret > 0) {
3234 /* there's no smaller item, so stick with the
3235 * larger one */
3236 btrfs_release_path(path);
3237 ret = btrfs_search_slot(NULL, root, &key,
3238 path, 0, 0);
3239 if (ret < 0)
3240 goto out;
3241 }
Arne Jansena2de7332011-03-08 14:14:00 +01003242 }
3243
Liu Bo625f1c8d2013-04-27 02:56:57 +00003244 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003245 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003246 u64 bytes;
3247
Arne Jansena2de7332011-03-08 14:14:00 +01003248 l = path->nodes[0];
3249 slot = path->slots[0];
3250 if (slot >= btrfs_header_nritems(l)) {
3251 ret = btrfs_next_leaf(root, path);
3252 if (ret == 0)
3253 continue;
3254 if (ret < 0)
3255 goto out;
3256
Liu Bo625f1c8d2013-04-27 02:56:57 +00003257 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003258 break;
3259 }
3260 btrfs_item_key_to_cpu(l, &key, slot);
3261
Zhao Leid7cad232015-07-22 13:14:48 +08003262 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3263 key.type != BTRFS_METADATA_ITEM_KEY)
3264 goto next;
3265
Josef Bacik3173a182013-03-07 14:22:04 -05003266 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003267 bytes = fs_info->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003268 else
3269 bytes = key.offset;
3270
3271 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003272 goto next;
3273
Liu Bo625f1c8d2013-04-27 02:56:57 +00003274 if (key.objectid >= logical + map->stripe_len) {
3275 /* out of this device extent */
3276 if (key.objectid >= logic_end)
3277 stop_loop = 1;
3278 break;
3279 }
Arne Jansena2de7332011-03-08 14:14:00 +01003280
3281 extent = btrfs_item_ptr(l, slot,
3282 struct btrfs_extent_item);
3283 flags = btrfs_extent_flags(l, extent);
3284 generation = btrfs_extent_generation(l, extent);
3285
Zhao Leia323e812015-07-23 12:29:49 +08003286 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3287 (key.objectid < logical ||
3288 key.objectid + bytes >
3289 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003290 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003291 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003292 key.objectid, logical);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003293 spin_lock(&sctx->stat_lock);
3294 sctx->stat.uncorrectable_errors++;
3295 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003296 goto next;
3297 }
3298
Liu Bo625f1c8d2013-04-27 02:56:57 +00003299again:
3300 extent_logical = key.objectid;
3301 extent_len = bytes;
3302
Arne Jansena2de7332011-03-08 14:14:00 +01003303 /*
3304 * trim extent to this stripe
3305 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003306 if (extent_logical < logical) {
3307 extent_len -= logical - extent_logical;
3308 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003309 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003310 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003311 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003312 extent_len = logical + map->stripe_len -
3313 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003314 }
3315
Liu Bo625f1c8d2013-04-27 02:56:57 +00003316 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003317 extent_dev = scrub_dev;
3318 extent_mirror_num = mirror_num;
Omar Sandoval32934282018-08-14 11:09:52 -07003319 if (sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003320 scrub_remap_extent(fs_info, extent_logical,
3321 extent_len, &extent_physical,
3322 &extent_dev,
3323 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003324
Zhao Leife8cf652015-07-22 13:14:47 +08003325 ret = btrfs_lookup_csums_range(csum_root,
3326 extent_logical,
3327 extent_logical +
3328 extent_len - 1,
3329 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003330 if (ret)
3331 goto out;
3332
Liu Bo6ca17652018-03-07 12:08:09 -07003333 ret = scrub_extent(sctx, map, extent_logical, extent_len,
Liu Bo625f1c8d2013-04-27 02:56:57 +00003334 extent_physical, extent_dev, flags,
3335 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003336 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003337
3338 scrub_free_csums(sctx);
3339
Liu Bo625f1c8d2013-04-27 02:56:57 +00003340 if (ret)
3341 goto out;
3342
3343 if (extent_logical + extent_len <
3344 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003345 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003346 /*
3347 * loop until we find next data stripe
3348 * or we have finished all stripes.
3349 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003350loop:
3351 physical += map->stripe_len;
3352 ret = get_raid56_logic_offset(physical,
3353 num, map, &logical,
3354 &stripe_logical);
3355 logical += base;
3356
3357 if (ret && physical < physical_end) {
3358 stripe_logical += base;
3359 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003360 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003361 ret = scrub_raid56_parity(sctx,
3362 map, scrub_dev, ppath,
3363 stripe_logical,
3364 stripe_end);
3365 if (ret)
3366 goto out;
3367 goto loop;
3368 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003369 } else {
3370 physical += map->stripe_len;
3371 logical += increment;
3372 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003373 if (logical < key.objectid + bytes) {
3374 cond_resched();
3375 goto again;
3376 }
3377
Wang Shilong3b080b22014-04-01 18:01:43 +08003378 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003379 stop_loop = 1;
3380 break;
3381 }
3382 }
Arne Jansena2de7332011-03-08 14:14:00 +01003383next:
3384 path->slots[0]++;
3385 }
Chris Mason71267332011-05-23 06:30:52 -04003386 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003387skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003388 logical += increment;
3389 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003390 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003391 if (stop_loop)
3392 sctx->stat.last_physical = map->stripes[num].physical +
3393 length;
3394 else
3395 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003396 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003397 if (stop_loop)
3398 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003399 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003400out:
Arne Jansena2de7332011-03-08 14:14:00 +01003401 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003402 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003403 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003404 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003405 mutex_unlock(&sctx->wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003406
Arne Jansene7786c32011-05-28 20:58:38 +00003407 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003408 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003409 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003410 return ret < 0 ? ret : 0;
3411}
3412
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003413static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003414 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003415 u64 chunk_offset, u64 length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003416 u64 dev_offset,
Omar Sandoval32934282018-08-14 11:09:52 -07003417 struct btrfs_block_group_cache *cache)
Arne Jansena2de7332011-03-08 14:14:00 +01003418{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003419 struct btrfs_fs_info *fs_info = sctx->fs_info;
3420 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003421 struct map_lookup *map;
3422 struct extent_map *em;
3423 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003424 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003425
3426 read_lock(&map_tree->map_tree.lock);
3427 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3428 read_unlock(&map_tree->map_tree.lock);
3429
Filipe Manana020d5b72015-11-19 10:57:20 +00003430 if (!em) {
3431 /*
3432 * Might have been an unused block group deleted by the cleaner
3433 * kthread or relocation.
3434 */
3435 spin_lock(&cache->lock);
3436 if (!cache->removed)
3437 ret = -EINVAL;
3438 spin_unlock(&cache->lock);
3439
3440 return ret;
3441 }
Arne Jansena2de7332011-03-08 14:14:00 +01003442
Jeff Mahoney95617d62015-06-03 10:55:48 -04003443 map = em->map_lookup;
Arne Jansena2de7332011-03-08 14:14:00 +01003444 if (em->start != chunk_offset)
3445 goto out;
3446
3447 if (em->len < length)
3448 goto out;
3449
3450 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003451 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003452 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003453 ret = scrub_stripe(sctx, map, scrub_dev, i,
Omar Sandoval32934282018-08-14 11:09:52 -07003454 chunk_offset, length);
Arne Jansena2de7332011-03-08 14:14:00 +01003455 if (ret)
3456 goto out;
3457 }
3458 }
3459out:
3460 free_extent_map(em);
3461
3462 return ret;
3463}
3464
3465static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003466int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Omar Sandoval32934282018-08-14 11:09:52 -07003467 struct btrfs_device *scrub_dev, u64 start, u64 end)
Arne Jansena2de7332011-03-08 14:14:00 +01003468{
3469 struct btrfs_dev_extent *dev_extent = NULL;
3470 struct btrfs_path *path;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003471 struct btrfs_fs_info *fs_info = sctx->fs_info;
3472 struct btrfs_root *root = fs_info->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003473 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003474 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003475 int ret = 0;
Zhaolei76a8efa2015-11-17 18:46:17 +08003476 int ro_set;
Arne Jansena2de7332011-03-08 14:14:00 +01003477 int slot;
3478 struct extent_buffer *l;
3479 struct btrfs_key key;
3480 struct btrfs_key found_key;
3481 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003482 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003483
3484 path = btrfs_alloc_path();
3485 if (!path)
3486 return -ENOMEM;
3487
David Sterbae4058b52015-11-27 16:31:35 +01003488 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +01003489 path->search_commit_root = 1;
3490 path->skip_locking = 1;
3491
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003492 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003493 key.offset = 0ull;
3494 key.type = BTRFS_DEV_EXTENT_KEY;
3495
Arne Jansena2de7332011-03-08 14:14:00 +01003496 while (1) {
3497 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3498 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003499 break;
3500 if (ret > 0) {
3501 if (path->slots[0] >=
3502 btrfs_header_nritems(path->nodes[0])) {
3503 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003504 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003505 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003506 if (ret > 0) {
3507 ret = 0;
3508 break;
3509 }
3510 } else {
3511 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003512 }
3513 }
Arne Jansena2de7332011-03-08 14:14:00 +01003514
3515 l = path->nodes[0];
3516 slot = path->slots[0];
3517
3518 btrfs_item_key_to_cpu(l, &found_key, slot);
3519
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003520 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003521 break;
3522
David Sterba962a2982014-06-04 18:41:45 +02003523 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003524 break;
3525
3526 if (found_key.offset >= end)
3527 break;
3528
3529 if (found_key.offset < key.offset)
3530 break;
3531
3532 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3533 length = btrfs_dev_extent_length(l, dev_extent);
3534
Qu Wenruoced96ed2014-06-19 10:42:51 +08003535 if (found_key.offset + length <= start)
3536 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003537
Arne Jansena2de7332011-03-08 14:14:00 +01003538 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3539
3540 /*
3541 * get a reference on the corresponding block group to prevent
3542 * the chunk from going away while we scrub it
3543 */
3544 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003545
3546 /* some chunks are removed but not committed to disk yet,
3547 * continue scrubbing */
3548 if (!cache)
3549 goto skip;
3550
Zhaolei55e3a602015-08-05 16:43:30 +08003551 /*
3552 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3553 * to avoid deadlock caused by:
3554 * btrfs_inc_block_group_ro()
3555 * -> btrfs_wait_for_commit()
3556 * -> btrfs_commit_transaction()
3557 * -> btrfs_scrub_pause()
3558 */
3559 scrub_pause_on(fs_info);
Nikolay Borisovc83488a2018-06-20 15:49:14 +03003560 ret = btrfs_inc_block_group_ro(cache);
Omar Sandoval32934282018-08-14 11:09:52 -07003561 if (!ret && sctx->is_dev_replace) {
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003562 /*
3563 * If we are doing a device replace wait for any tasks
3564 * that started dellaloc right before we set the block
3565 * group to RO mode, as they might have just allocated
3566 * an extent from it or decided they could do a nocow
3567 * write. And if any such tasks did that, wait for their
3568 * ordered extents to complete and then commit the
3569 * current transaction, so that we can later see the new
3570 * extent items in the extent tree - the ordered extents
3571 * create delayed data references (for cow writes) when
3572 * they complete, which will be run and insert the
3573 * corresponding extent items into the extent tree when
3574 * we commit the transaction they used when running
3575 * inode.c:btrfs_finish_ordered_io(). We later use
3576 * the commit root of the extent tree to find extents
3577 * to copy from the srcdev into the tgtdev, and we don't
3578 * want to miss any new extents.
3579 */
3580 btrfs_wait_block_group_reservations(cache);
3581 btrfs_wait_nocow_writers(cache);
Chris Mason6374e57a2017-06-23 09:48:21 -07003582 ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003583 cache->key.objectid,
3584 cache->key.offset);
3585 if (ret > 0) {
3586 struct btrfs_trans_handle *trans;
3587
3588 trans = btrfs_join_transaction(root);
3589 if (IS_ERR(trans))
3590 ret = PTR_ERR(trans);
3591 else
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003592 ret = btrfs_commit_transaction(trans);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003593 if (ret) {
3594 scrub_pause_off(fs_info);
3595 btrfs_put_block_group(cache);
3596 break;
3597 }
3598 }
3599 }
Zhaolei55e3a602015-08-05 16:43:30 +08003600 scrub_pause_off(fs_info);
Zhaolei76a8efa2015-11-17 18:46:17 +08003601
3602 if (ret == 0) {
3603 ro_set = 1;
3604 } else if (ret == -ENOSPC) {
3605 /*
3606 * btrfs_inc_block_group_ro return -ENOSPC when it
3607 * failed in creating new chunk for metadata.
3608 * It is not a problem for scrub/replace, because
3609 * metadata are always cowed, and our scrub paused
3610 * commit_transactions.
3611 */
3612 ro_set = 0;
3613 } else {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003614 btrfs_warn(fs_info,
David Sterba913e1532017-07-13 15:32:18 +02003615 "failed setting block group ro: %d", ret);
Zhaolei55e3a602015-08-05 16:43:30 +08003616 btrfs_put_block_group(cache);
3617 break;
3618 }
3619
David Sterbacb5583d2018-09-07 16:11:23 +02003620 down_write(&fs_info->dev_replace.rwsem);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003621 dev_replace->cursor_right = found_key.offset + length;
3622 dev_replace->cursor_left = found_key.offset;
3623 dev_replace->item_needs_writeback = 1;
David Sterbacb5583d2018-09-07 16:11:23 +02003624 up_write(&dev_replace->rwsem);
3625
Zhao Lei8c204c92015-08-19 15:02:40 +08003626 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
Omar Sandoval32934282018-08-14 11:09:52 -07003627 found_key.offset, cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003628
3629 /*
3630 * flush, submit all pending read and write bios, afterwards
3631 * wait for them.
3632 * Note that in the dev replace case, a read request causes
3633 * write requests that are submitted in the read completion
3634 * worker. Therefore in the current situation, it is required
3635 * that all write requests are flushed, so that all read and
3636 * write requests are really completed when bios_in_flight
3637 * changes to 0.
3638 */
David Sterba2073c4c2017-03-31 17:12:51 +02003639 sctx->flush_all_writes = true;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003640 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003641 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003642 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003643 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003644
3645 wait_event(sctx->list_wait,
3646 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003647
3648 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003649
3650 /*
3651 * must be called before we decrease @scrub_paused.
3652 * make sure we don't block transaction commit while
3653 * we are waiting pending workers finished.
3654 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003655 wait_event(sctx->list_wait,
3656 atomic_read(&sctx->workers_pending) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003657 sctx->flush_all_writes = false;
Wang Shilong12cf9372014-02-19 19:24:17 +08003658
Zhaoleib708ce92015-08-05 16:43:29 +08003659 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003660
David Sterbacb5583d2018-09-07 16:11:23 +02003661 down_write(&fs_info->dev_replace.rwsem);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003662 dev_replace->cursor_left = dev_replace->cursor_right;
3663 dev_replace->item_needs_writeback = 1;
David Sterbacb5583d2018-09-07 16:11:23 +02003664 up_write(&fs_info->dev_replace.rwsem);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003665
Zhaolei76a8efa2015-11-17 18:46:17 +08003666 if (ro_set)
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003667 btrfs_dec_block_group_ro(cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003668
Filipe Manana758f2df2015-11-19 11:45:48 +00003669 /*
3670 * We might have prevented the cleaner kthread from deleting
3671 * this block group if it was already unused because we raced
3672 * and set it to RO mode first. So add it back to the unused
3673 * list, otherwise it might not ever be deleted unless a manual
3674 * balance is triggered or it becomes used and unused again.
3675 */
3676 spin_lock(&cache->lock);
3677 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3678 btrfs_block_group_used(&cache->item) == 0) {
3679 spin_unlock(&cache->lock);
Qu Wenruo031f24d2018-05-22 16:43:47 +08003680 btrfs_mark_bg_unused(cache);
Filipe Manana758f2df2015-11-19 11:45:48 +00003681 } else {
3682 spin_unlock(&cache->lock);
3683 }
3684
Arne Jansena2de7332011-03-08 14:14:00 +01003685 btrfs_put_block_group(cache);
3686 if (ret)
3687 break;
Omar Sandoval32934282018-08-14 11:09:52 -07003688 if (sctx->is_dev_replace &&
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003689 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003690 ret = -EIO;
3691 break;
3692 }
3693 if (sctx->stat.malloc_errors > 0) {
3694 ret = -ENOMEM;
3695 break;
3696 }
Qu Wenruoced96ed2014-06-19 10:42:51 +08003697skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003698 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003699 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003700 }
3701
Arne Jansena2de7332011-03-08 14:14:00 +01003702 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003703
Zhaolei55e3a602015-08-05 16:43:30 +08003704 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003705}
3706
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003707static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3708 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003709{
3710 int i;
3711 u64 bytenr;
3712 u64 gen;
3713 int ret;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003714 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003715
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003716 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003717 return -EIO;
3718
Miao Xie5f546062014-07-24 11:37:09 +08003719 /* Seed devices of a new filesystem has their own generation. */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003720 if (scrub_dev->fs_devices != fs_info->fs_devices)
Miao Xie5f546062014-07-24 11:37:09 +08003721 gen = scrub_dev->generation;
3722 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003723 gen = fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003724
3725 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3726 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003727 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3728 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003729 break;
3730
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003731 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003732 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003733 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003734 if (ret)
3735 return ret;
3736 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003737 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003738
3739 return 0;
3740}
3741
3742/*
3743 * get a reference count on fs_info->scrub_workers. start worker if necessary
3744 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003745static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3746 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003747{
David Sterba6f011052015-02-16 18:34:01 +01003748 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003749 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003750
Arne Jansen632dd772011-06-10 12:07:07 +02003751 if (fs_info->scrub_workers_refcnt == 0) {
David Sterbaaf1cbe02017-03-31 18:42:57 +02003752 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
3753 flags, is_dev_replace ? 1 : max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08003754 if (!fs_info->scrub_workers)
3755 goto fail_scrub_workers;
3756
Qu Wenruo0339ef22014-02-28 10:46:17 +08003757 fs_info->scrub_wr_completion_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003758 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08003759 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003760 if (!fs_info->scrub_wr_completion_workers)
3761 goto fail_scrub_wr_completion_workers;
3762
Zhao Lei20b2e302015-06-04 20:09:15 +08003763 fs_info->scrub_parity_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003764 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
Zhao Lei20b2e302015-06-04 20:09:15 +08003765 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003766 if (!fs_info->scrub_parity_workers)
3767 goto fail_scrub_parity_workers;
Arne Jansen632dd772011-06-10 12:07:07 +02003768 }
Arne Jansena2de7332011-03-08 14:14:00 +01003769 ++fs_info->scrub_workers_refcnt;
Zhao Leie82afc52015-06-12 20:36:58 +08003770 return 0;
3771
3772fail_scrub_parity_workers:
Zhao Leie82afc52015-06-12 20:36:58 +08003773 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3774fail_scrub_wr_completion_workers:
3775 btrfs_destroy_workqueue(fs_info->scrub_workers);
3776fail_scrub_workers:
3777 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01003778}
3779
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003780static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003781{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003782 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003783 btrfs_destroy_workqueue(fs_info->scrub_workers);
3784 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
Zhao Lei20b2e302015-06-04 20:09:15 +08003785 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003786 }
Arne Jansena2de7332011-03-08 14:14:00 +01003787 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003788}
3789
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003790int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3791 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003792 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003793{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003794 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003795 int ret;
3796 struct btrfs_device *dev;
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003797 unsigned int nofs_flag;
Arne Jansena2de7332011-03-08 14:14:00 +01003798
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003799 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003800 return -EINVAL;
3801
Jeff Mahoneyda170662016-06-15 09:22:56 -04003802 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003803 /*
3804 * in this case scrub is unable to calculate the checksum
3805 * the way scrub is implemented. Do not handle this
3806 * situation at all because it won't ever happen.
3807 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003808 btrfs_err(fs_info,
3809 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003810 fs_info->nodesize,
3811 BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003812 return -EINVAL;
3813 }
3814
Jeff Mahoneyda170662016-06-15 09:22:56 -04003815 if (fs_info->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003816 /* not supported for data w/o checksums */
Chandan Rajendra751bebbe2016-07-04 10:04:39 +05303817 btrfs_err_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003818 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003819 fs_info->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003820 return -EINVAL;
3821 }
3822
Jeff Mahoneyda170662016-06-15 09:22:56 -04003823 if (fs_info->nodesize >
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003824 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
Jeff Mahoneyda170662016-06-15 09:22:56 -04003825 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003826 /*
3827 * would exhaust the array bounds of pagev member in
3828 * struct scrub_block
3829 */
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003830 btrfs_err(fs_info,
3831 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003832 fs_info->nodesize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003833 SCRUB_MAX_PAGES_PER_BLOCK,
Jeff Mahoneyda170662016-06-15 09:22:56 -04003834 fs_info->sectorsize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003835 SCRUB_MAX_PAGES_PER_BLOCK);
3836 return -EINVAL;
3837 }
3838
Arne Jansena2de7332011-03-08 14:14:00 +01003839
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003840 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3841 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Anand Jaine6e674b2017-12-04 12:54:54 +08003842 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
3843 !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003844 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003845 return -ENODEV;
3846 }
Arne Jansena2de7332011-03-08 14:14:00 +01003847
Anand Jainebbede42017-12-04 12:54:52 +08003848 if (!is_dev_replace && !readonly &&
3849 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Miao Xie5d68da32014-07-24 11:37:07 +08003850 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Misono Tomohiro672d5992018-08-02 16:19:07 +09003851 btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
3852 rcu_str_deref(dev->name));
Miao Xie5d68da32014-07-24 11:37:07 +08003853 return -EROFS;
3854 }
3855
Wang Shilong3b7a0162013-10-12 02:11:12 +08003856 mutex_lock(&fs_info->scrub_lock);
Anand Jaine12c9622017-12-04 12:54:53 +08003857 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
Anand Jain401e29c2017-12-04 12:54:55 +08003858 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003859 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003860 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003861 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003862 }
3863
David Sterbacb5583d2018-09-07 16:11:23 +02003864 down_read(&fs_info->dev_replace.rwsem);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003865 if (dev->scrub_ctx ||
Stefan Behrens8dabb742012-11-06 13:15:27 +01003866 (!is_dev_replace &&
3867 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
David Sterbacb5583d2018-09-07 16:11:23 +02003868 up_read(&fs_info->dev_replace.rwsem);
Arne Jansena2de7332011-03-08 14:14:00 +01003869 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003870 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003871 return -EINPROGRESS;
3872 }
David Sterbacb5583d2018-09-07 16:11:23 +02003873 up_read(&fs_info->dev_replace.rwsem);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003874
3875 ret = scrub_workers_get(fs_info, is_dev_replace);
3876 if (ret) {
3877 mutex_unlock(&fs_info->scrub_lock);
3878 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3879 return ret;
3880 }
3881
Stefan Behrens63a212a2012-11-05 18:29:28 +01003882 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003883 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003884 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003885 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3886 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003887 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003888 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003889 sctx->readonly = readonly;
Anand Jaincadbc0a2018-01-03 16:08:30 +08003890 dev->scrub_ctx = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003891 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003892
Wang Shilong3cb09292013-12-04 21:15:19 +08003893 /*
3894 * checking @scrub_pause_req here, we can avoid
3895 * race between committing transaction and scrubbing.
3896 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003897 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003898 atomic_inc(&fs_info->scrubs_running);
3899 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003900
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003901 /*
3902 * In order to avoid deadlock with reclaim when there is a transaction
3903 * trying to pause scrub, make sure we use GFP_NOFS for all the
3904 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3905 * invoked by our callees. The pausing request is done when the
3906 * transaction commit starts, and it blocks the transaction until scrub
3907 * is paused (done at specific points at scrub_stripe() or right above
3908 * before incrementing fs_info->scrubs_running).
3909 */
3910 nofs_flag = memalloc_nofs_save();
Stefan Behrensff023aa2012-11-06 11:43:11 +01003911 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003912 /*
3913 * by holding device list mutex, we can
3914 * kick off writing super in log tree sync.
3915 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003916 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003917 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003918 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003919 }
Arne Jansena2de7332011-03-08 14:14:00 +01003920
3921 if (!ret)
Omar Sandoval32934282018-08-14 11:09:52 -07003922 ret = scrub_enumerate_chunks(sctx, dev, start, end);
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003923 memalloc_nofs_restore(nofs_flag);
Arne Jansena2de7332011-03-08 14:14:00 +01003924
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003925 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003926 atomic_dec(&fs_info->scrubs_running);
3927 wake_up(&fs_info->scrub_pause_wait);
3928
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003929 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003930
Arne Jansena2de7332011-03-08 14:14:00 +01003931 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003932 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003933
3934 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003935 dev->scrub_ctx = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003936 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003937 mutex_unlock(&fs_info->scrub_lock);
3938
Filipe Mananaf55985f2015-02-09 21:14:24 +00003939 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003940
3941 return ret;
3942}
3943
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003944void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003945{
Arne Jansena2de7332011-03-08 14:14:00 +01003946 mutex_lock(&fs_info->scrub_lock);
3947 atomic_inc(&fs_info->scrub_pause_req);
3948 while (atomic_read(&fs_info->scrubs_paused) !=
3949 atomic_read(&fs_info->scrubs_running)) {
3950 mutex_unlock(&fs_info->scrub_lock);
3951 wait_event(fs_info->scrub_pause_wait,
3952 atomic_read(&fs_info->scrubs_paused) ==
3953 atomic_read(&fs_info->scrubs_running));
3954 mutex_lock(&fs_info->scrub_lock);
3955 }
3956 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003957}
3958
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003959void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003960{
Arne Jansena2de7332011-03-08 14:14:00 +01003961 atomic_dec(&fs_info->scrub_pause_req);
3962 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003963}
3964
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003965int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003966{
Arne Jansena2de7332011-03-08 14:14:00 +01003967 mutex_lock(&fs_info->scrub_lock);
3968 if (!atomic_read(&fs_info->scrubs_running)) {
3969 mutex_unlock(&fs_info->scrub_lock);
3970 return -ENOTCONN;
3971 }
3972
3973 atomic_inc(&fs_info->scrub_cancel_req);
3974 while (atomic_read(&fs_info->scrubs_running)) {
3975 mutex_unlock(&fs_info->scrub_lock);
3976 wait_event(fs_info->scrub_pause_wait,
3977 atomic_read(&fs_info->scrubs_running) == 0);
3978 mutex_lock(&fs_info->scrub_lock);
3979 }
3980 atomic_dec(&fs_info->scrub_cancel_req);
3981 mutex_unlock(&fs_info->scrub_lock);
3982
3983 return 0;
3984}
3985
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003986int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3987 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01003988{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003989 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003990
3991 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003992 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003993 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003994 mutex_unlock(&fs_info->scrub_lock);
3995 return -ENOTCONN;
3996 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003997 atomic_inc(&sctx->cancel_req);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003998 while (dev->scrub_ctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003999 mutex_unlock(&fs_info->scrub_lock);
4000 wait_event(fs_info->scrub_pause_wait,
Anand Jaincadbc0a2018-01-03 16:08:30 +08004001 dev->scrub_ctx == NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004002 mutex_lock(&fs_info->scrub_lock);
4003 }
4004 mutex_unlock(&fs_info->scrub_lock);
4005
4006 return 0;
4007}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004008
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004009int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
Arne Jansena2de7332011-03-08 14:14:00 +01004010 struct btrfs_scrub_progress *progress)
4011{
4012 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004013 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004014
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004015 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4016 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004017 if (dev)
Anand Jaincadbc0a2018-01-03 16:08:30 +08004018 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004019 if (sctx)
4020 memcpy(progress, &sctx->stat, sizeof(*progress));
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004021 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004022
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004023 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004024}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004025
4026static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4027 u64 extent_logical, u64 extent_len,
4028 u64 *extent_physical,
4029 struct btrfs_device **extent_dev,
4030 int *extent_mirror_num)
4031{
4032 u64 mapped_length;
4033 struct btrfs_bio *bbio = NULL;
4034 int ret;
4035
4036 mapped_length = extent_len;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02004037 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004038 &mapped_length, &bbio, 0);
4039 if (ret || !bbio || mapped_length < extent_len ||
4040 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004041 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004042 return;
4043 }
4044
4045 *extent_physical = bbio->stripes[0].physical;
4046 *extent_mirror_num = bbio->mirror_num;
4047 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004048 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004049}