blob: f7b29f9db5e2ffa090b9468b012f070f7ab1cbf5 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Arne Jansena2de7332011-03-08 14:14:00 +01002/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01004 */
5
Arne Jansena2de7332011-03-08 14:14:00 +01006#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +02007#include <linux/ratelimit.h>
David Sterbade2491f2017-05-31 19:21:38 +02008#include <linux/sched/mm.h>
Arne Jansena2de7332011-03-08 14:14:00 +01009#include "ctree.h"
10#include "volumes.h"
11#include "disk-io.h"
12#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020013#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020014#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020015#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010016#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010017#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040018#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050019#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010020
21/*
22 * This is only the first step towards a full-features scrub. It reads all
23 * extent and super block and verifies the checksums. In case a bad checksum
24 * is found or the extent cannot be read, good data will be written back if
25 * any can be found.
26 *
27 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010028 * - In case an unrepairable extent is encountered, track which files are
29 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010030 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010031 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010032 */
33
Stefan Behrensb5d67f62012-03-27 14:21:27 -040034struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010035struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010036
Stefan Behrensff023aa2012-11-06 11:43:11 +010037/*
38 * the following three values only influence the performance.
39 * The last one configures the number of parallel and outstanding I/O
40 * operations. The first two values configure an upper limit for the number
41 * of (dynamically allocated) pages that are added to a bio.
42 */
43#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
44#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
45#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010046
47/*
48 * the following value times PAGE_SIZE needs to be large enough to match the
49 * largest node/leaf/sector size that shall be supported.
50 * Values larger than BTRFS_STRIPE_LEN are not supported.
51 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040052#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010053
Miao Xieaf8e2d12014-10-23 14:42:50 +080054struct scrub_recover {
Elena Reshetova6f615012017-03-03 10:55:21 +020055 refcount_t refs;
Miao Xieaf8e2d12014-10-23 14:42:50 +080056 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080057 u64 map_length;
58};
59
Arne Jansena2de7332011-03-08 14:14:00 +010060struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040061 struct scrub_block *sblock;
62 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020063 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080064 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010065 u64 flags; /* extent flags */
66 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040067 u64 logical;
68 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010069 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080070 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040071 struct {
72 unsigned int mirror_num:8;
73 unsigned int have_csum:1;
74 unsigned int io_error:1;
75 };
Arne Jansena2de7332011-03-08 14:14:00 +010076 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080077
78 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010079};
80
81struct scrub_bio {
82 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010083 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010084 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010085 struct bio *bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +020086 blk_status_t status;
Arne Jansena2de7332011-03-08 14:14:00 +010087 u64 logical;
88 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010089#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
90 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
91#else
92 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
93#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -040094 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +010095 int next_free;
96 struct btrfs_work work;
97};
98
Stefan Behrensb5d67f62012-03-27 14:21:27 -040099struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100100 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400101 int page_count;
102 atomic_t outstanding_pages;
Elena Reshetova186debd2017-03-03 10:55:23 +0200103 refcount_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100104 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800105 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 struct {
107 unsigned int header_error:1;
108 unsigned int checksum_error:1;
109 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200110 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800111
112 /* The following is for the data used to check parity */
113 /* It is for the data with checksum */
114 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400115 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700116 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400117};
118
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800119/* Used for the chunks with parity stripe such RAID5/6 */
120struct scrub_parity {
121 struct scrub_ctx *sctx;
122
123 struct btrfs_device *scrub_dev;
124
125 u64 logic_start;
126
127 u64 logic_end;
128
129 int nsectors;
130
Liu Bo972d7212017-04-03 13:45:33 -0700131 u64 stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800132
Elena Reshetova78a76452017-03-03 10:55:24 +0200133 refcount_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800134
135 struct list_head spages;
136
137 /* Work of parity check and repair */
138 struct btrfs_work work;
139
140 /* Mark the parity blocks which have data */
141 unsigned long *dbitmap;
142
143 /*
144 * Mark the parity blocks which have data, but errors happen when
145 * read data or check data
146 */
147 unsigned long *ebitmap;
148
149 unsigned long bitmap[0];
150};
151
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100152struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100153 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400154 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100155 int first_free;
156 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100157 atomic_t bios_in_flight;
158 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100159 spinlock_t list_lock;
160 wait_queue_head_t list_wait;
161 u16 csum_size;
162 struct list_head csum_list;
163 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100164 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100165 int pages_per_rd_bio;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100166
167 int is_dev_replace;
David Sterba3fb99302017-05-16 19:10:32 +0200168
169 struct scrub_bio *wr_curr_bio;
170 struct mutex wr_lock;
171 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
David Sterba3fb99302017-05-16 19:10:32 +0200172 struct btrfs_device *wr_tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200173 bool flush_all_writes;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100174
Arne Jansena2de7332011-03-08 14:14:00 +0100175 /*
176 * statistics
177 */
178 struct btrfs_scrub_progress stat;
179 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000180
181 /*
182 * Use a ref counter to avoid use-after-free issues. Scrub workers
183 * decrement bios_in_flight and workers_pending and then do a wakeup
184 * on the list_wait wait queue. We must ensure the main scrub task
185 * doesn't free the scrub context before or while the workers are
186 * doing the wakeup() call.
187 */
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200188 refcount_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100189};
190
Jan Schmidt558540c2011-06-13 19:59:12 +0200191struct scrub_warning {
192 struct btrfs_path *path;
193 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200194 const char *errstr;
David Sterba6aa21262017-10-04 17:07:07 +0200195 u64 physical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200196 u64 logical;
197 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200198};
199
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800200struct full_stripe_lock {
201 struct rb_node node;
202 u64 logical;
203 u64 refs;
204 struct mutex mutex;
205};
206
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100207static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
208static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400209static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800210static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100211 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100212static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +0800213 struct scrub_block *sblock,
214 int retry_failed_mirror);
Zhao Leiba7cf982015-08-24 21:18:02 +0800215static void scrub_recheck_block_checksum(struct scrub_block *sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400216static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800217 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400218static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
219 struct scrub_block *sblock_good,
220 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100221static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
222static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
223 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400224static int scrub_checksum_data(struct scrub_block *sblock);
225static int scrub_checksum_tree_block(struct scrub_block *sblock);
226static int scrub_checksum_super(struct scrub_block *sblock);
227static void scrub_block_get(struct scrub_block *sblock);
228static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100229static void scrub_page_get(struct scrub_page *spage);
230static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800231static void scrub_parity_get(struct scrub_parity *sparity);
232static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100233static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
234 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100235static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100236 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100237 u64 gen, int mirror_num, u8 *csum, int force,
238 u64 physical_for_dev_replace);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200239static void scrub_bio_end_io(struct bio *bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400240static void scrub_bio_end_io_worker(struct btrfs_work *work);
241static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100242static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
243 u64 extent_logical, u64 extent_len,
244 u64 *extent_physical,
245 struct btrfs_device **extent_dev,
246 int *extent_mirror_num);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100247static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
248 struct scrub_page *spage);
249static void scrub_wr_submit(struct scrub_ctx *sctx);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200250static void scrub_wr_bio_end_io(struct bio *bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100251static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800252static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800253static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000254static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400255
Liu Bo762221f2018-01-02 13:36:42 -0700256static inline int scrub_is_page_on_raid56(struct scrub_page *page)
257{
258 return page->recover &&
259 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
260}
Stefan Behrens1623ede2012-03-27 14:21:26 -0400261
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100262static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
263{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200264 refcount_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100265 atomic_inc(&sctx->bios_in_flight);
266}
267
268static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
269{
270 atomic_dec(&sctx->bios_in_flight);
271 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000272 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100273}
274
Wang Shilongcb7ab022013-12-04 21:16:53 +0800275static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800276{
277 while (atomic_read(&fs_info->scrub_pause_req)) {
278 mutex_unlock(&fs_info->scrub_lock);
279 wait_event(fs_info->scrub_pause_wait,
280 atomic_read(&fs_info->scrub_pause_req) == 0);
281 mutex_lock(&fs_info->scrub_lock);
282 }
283}
284
Zhaolei0e22be82015-08-05 16:43:28 +0800285static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800286{
287 atomic_inc(&fs_info->scrubs_paused);
288 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800289}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800290
Zhaolei0e22be82015-08-05 16:43:28 +0800291static void scrub_pause_off(struct btrfs_fs_info *fs_info)
292{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800293 mutex_lock(&fs_info->scrub_lock);
294 __scrub_blocked_if_needed(fs_info);
295 atomic_dec(&fs_info->scrubs_paused);
296 mutex_unlock(&fs_info->scrub_lock);
297
298 wake_up(&fs_info->scrub_pause_wait);
299}
300
Zhaolei0e22be82015-08-05 16:43:28 +0800301static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
302{
303 scrub_pause_on(fs_info);
304 scrub_pause_off(fs_info);
305}
306
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100307/*
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800308 * Insert new full stripe lock into full stripe locks tree
309 *
310 * Return pointer to existing or newly inserted full_stripe_lock structure if
311 * everything works well.
312 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
313 *
314 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
315 * function
316 */
317static struct full_stripe_lock *insert_full_stripe_lock(
318 struct btrfs_full_stripe_locks_tree *locks_root,
319 u64 fstripe_logical)
320{
321 struct rb_node **p;
322 struct rb_node *parent = NULL;
323 struct full_stripe_lock *entry;
324 struct full_stripe_lock *ret;
325
David Sterbaa32bf9a2018-03-16 02:21:22 +0100326 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800327
328 p = &locks_root->root.rb_node;
329 while (*p) {
330 parent = *p;
331 entry = rb_entry(parent, struct full_stripe_lock, node);
332 if (fstripe_logical < entry->logical) {
333 p = &(*p)->rb_left;
334 } else if (fstripe_logical > entry->logical) {
335 p = &(*p)->rb_right;
336 } else {
337 entry->refs++;
338 return entry;
339 }
340 }
341
Filipe Mananaa5fb1142018-11-26 20:07:17 +0000342 /*
343 * Insert new lock.
Filipe Mananaa5fb1142018-11-26 20:07:17 +0000344 */
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800345 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
346 if (!ret)
347 return ERR_PTR(-ENOMEM);
348 ret->logical = fstripe_logical;
349 ret->refs = 1;
350 mutex_init(&ret->mutex);
351
352 rb_link_node(&ret->node, parent, p);
353 rb_insert_color(&ret->node, &locks_root->root);
354 return ret;
355}
356
357/*
358 * Search for a full stripe lock of a block group
359 *
360 * Return pointer to existing full stripe lock if found
361 * Return NULL if not found
362 */
363static struct full_stripe_lock *search_full_stripe_lock(
364 struct btrfs_full_stripe_locks_tree *locks_root,
365 u64 fstripe_logical)
366{
367 struct rb_node *node;
368 struct full_stripe_lock *entry;
369
David Sterbaa32bf9a2018-03-16 02:21:22 +0100370 lockdep_assert_held(&locks_root->lock);
Qu Wenruo0966a7b2017-04-14 08:35:54 +0800371
372 node = locks_root->root.rb_node;
373 while (node) {
374 entry = rb_entry(node, struct full_stripe_lock, node);
375 if (fstripe_logical < entry->logical)
376 node = node->rb_left;
377 else if (fstripe_logical > entry->logical)
378 node = node->rb_right;
379 else
380 return entry;
381 }
382 return NULL;
383}
384
385/*
386 * Helper to get full stripe logical from a normal bytenr.
387 *
388 * Caller must ensure @cache is a RAID56 block group.
389 */
390static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
391 u64 bytenr)
392{
393 u64 ret;
394
395 /*
396 * Due to chunk item size limit, full stripe length should not be
397 * larger than U32_MAX. Just a sanity check here.
398 */
399 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
400
401 /*
402 * round_down() can only handle power of 2, while RAID56 full
403 * stripe length can be 64KiB * n, so we need to manually round down.
404 */
405 ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
406 cache->full_stripe_len + cache->key.objectid;
407 return ret;
408}
409
410/*
411 * Lock a full stripe to avoid concurrency of recovery and read
412 *
413 * It's only used for profiles with parities (RAID5/6), for other profiles it
414 * does nothing.
415 *
416 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
417 * So caller must call unlock_full_stripe() at the same context.
418 *
419 * Return <0 if encounters error.
420 */
421static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
422 bool *locked_ret)
423{
424 struct btrfs_block_group_cache *bg_cache;
425 struct btrfs_full_stripe_locks_tree *locks_root;
426 struct full_stripe_lock *existing;
427 u64 fstripe_start;
428 int ret = 0;
429
430 *locked_ret = false;
431 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
432 if (!bg_cache) {
433 ASSERT(0);
434 return -ENOENT;
435 }
436
437 /* Profiles not based on parity don't need full stripe lock */
438 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
439 goto out;
440 locks_root = &bg_cache->full_stripe_locks_root;
441
442 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
443
444 /* Now insert the full stripe lock */
445 mutex_lock(&locks_root->lock);
446 existing = insert_full_stripe_lock(locks_root, fstripe_start);
447 mutex_unlock(&locks_root->lock);
448 if (IS_ERR(existing)) {
449 ret = PTR_ERR(existing);
450 goto out;
451 }
452 mutex_lock(&existing->mutex);
453 *locked_ret = true;
454out:
455 btrfs_put_block_group(bg_cache);
456 return ret;
457}
458
459/*
460 * Unlock a full stripe.
461 *
462 * NOTE: Caller must ensure it's the same context calling corresponding
463 * lock_full_stripe().
464 *
465 * Return 0 if we unlock full stripe without problem.
466 * Return <0 for error
467 */
468static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
469 bool locked)
470{
471 struct btrfs_block_group_cache *bg_cache;
472 struct btrfs_full_stripe_locks_tree *locks_root;
473 struct full_stripe_lock *fstripe_lock;
474 u64 fstripe_start;
475 bool freeit = false;
476 int ret = 0;
477
478 /* If we didn't acquire full stripe lock, no need to continue */
479 if (!locked)
480 return 0;
481
482 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
483 if (!bg_cache) {
484 ASSERT(0);
485 return -ENOENT;
486 }
487 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
488 goto out;
489
490 locks_root = &bg_cache->full_stripe_locks_root;
491 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
492
493 mutex_lock(&locks_root->lock);
494 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
495 /* Unpaired unlock_full_stripe() detected */
496 if (!fstripe_lock) {
497 WARN_ON(1);
498 ret = -ENOENT;
499 mutex_unlock(&locks_root->lock);
500 goto out;
501 }
502
503 if (fstripe_lock->refs == 0) {
504 WARN_ON(1);
505 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
506 fstripe_lock->logical);
507 } else {
508 fstripe_lock->refs--;
509 }
510
511 if (fstripe_lock->refs == 0) {
512 rb_erase(&fstripe_lock->node, &locks_root->root);
513 freeit = true;
514 }
515 mutex_unlock(&locks_root->lock);
516
517 mutex_unlock(&fstripe_lock->mutex);
518 if (freeit)
519 kfree(fstripe_lock);
520out:
521 btrfs_put_block_group(bg_cache);
522 return ret;
523}
524
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100525static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100526{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100527 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100528 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100529 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100530 struct btrfs_ordered_sum, list);
531 list_del(&sum->list);
532 kfree(sum);
533 }
534}
535
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100536static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100537{
538 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100539
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100540 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100541 return;
542
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400543 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100544 if (sctx->curr != -1) {
545 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400546
547 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100548 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400549 scrub_block_put(sbio->pagev[i]->sblock);
550 }
551 bio_put(sbio->bio);
552 }
553
Stefan Behrensff023aa2012-11-06 11:43:11 +0100554 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100555 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100556
557 if (!sbio)
558 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100559 kfree(sbio);
560 }
561
David Sterba3fb99302017-05-16 19:10:32 +0200562 kfree(sctx->wr_curr_bio);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100563 scrub_free_csums(sctx);
564 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100565}
566
Filipe Mananaf55985f2015-02-09 21:14:24 +0000567static void scrub_put_ctx(struct scrub_ctx *sctx)
568{
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200569 if (refcount_dec_and_test(&sctx->refs))
Filipe Mananaf55985f2015-02-09 21:14:24 +0000570 scrub_free_ctx(sctx);
571}
572
David Sterba92f7ba42018-12-04 16:11:55 +0100573static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
574 struct btrfs_fs_info *fs_info, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100575{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100576 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100577 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100578
David Sterba58c4e172016-02-11 10:49:42 +0100579 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100580 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100581 goto nomem;
Elena Reshetova99f4cdb2017-03-03 10:55:25 +0200582 refcount_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100583 sctx->is_dev_replace = is_dev_replace;
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200584 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100585 sctx->curr = -1;
David Sterba92f7ba42018-12-04 16:11:55 +0100586 sctx->fs_info = fs_info;
Dan Robertsone49be142019-02-19 02:56:43 +0000587 INIT_LIST_HEAD(&sctx->csum_list);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100588 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100589 struct scrub_bio *sbio;
590
David Sterba58c4e172016-02-11 10:49:42 +0100591 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
Arne Jansena2de7332011-03-08 14:14:00 +0100592 if (!sbio)
593 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100594 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100595
Arne Jansena2de7332011-03-08 14:14:00 +0100596 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100597 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400598 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800599 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
600 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100601
Stefan Behrensff023aa2012-11-06 11:43:11 +0100602 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100603 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200604 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100605 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100606 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100607 sctx->first_free = 0;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100608 atomic_set(&sctx->bios_in_flight, 0);
609 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100610 atomic_set(&sctx->cancel_req, 0);
611 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
Arne Jansena2de7332011-03-08 14:14:00 +0100612
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100613 spin_lock_init(&sctx->list_lock);
614 spin_lock_init(&sctx->stat_lock);
615 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100616
David Sterba3fb99302017-05-16 19:10:32 +0200617 WARN_ON(sctx->wr_curr_bio != NULL);
618 mutex_init(&sctx->wr_lock);
619 sctx->wr_curr_bio = NULL;
David Sterba8fcdac32017-05-16 19:10:23 +0200620 if (is_dev_replace) {
David Sterbaded56182017-06-26 15:19:00 +0200621 WARN_ON(!fs_info->dev_replace.tgtdev);
David Sterba3fb99302017-05-16 19:10:32 +0200622 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
David Sterbaded56182017-06-26 15:19:00 +0200623 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
David Sterba2073c4c2017-03-31 17:12:51 +0200624 sctx->flush_all_writes = false;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100625 }
David Sterba8fcdac32017-05-16 19:10:23 +0200626
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100627 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100628
629nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100630 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100631 return ERR_PTR(-ENOMEM);
632}
633
Stefan Behrensff023aa2012-11-06 11:43:11 +0100634static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
635 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200636{
637 u64 isize;
638 u32 nlink;
639 int ret;
640 int i;
David Sterbade2491f2017-05-31 19:21:38 +0200641 unsigned nofs_flag;
Jan Schmidt558540c2011-06-13 19:59:12 +0200642 struct extent_buffer *eb;
643 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100644 struct scrub_warning *swarn = warn_ctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400645 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200646 struct inode_fs_paths *ipath = NULL;
647 struct btrfs_root *local_root;
648 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100649 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200650
651 root_key.objectid = root;
652 root_key.type = BTRFS_ROOT_ITEM_KEY;
653 root_key.offset = (u64)-1;
654 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
655 if (IS_ERR(local_root)) {
656 ret = PTR_ERR(local_root);
657 goto err;
658 }
659
David Sterba14692cc2015-01-02 18:55:46 +0100660 /*
661 * this makes the path point to (inum INODE_ITEM ioff)
662 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100663 key.objectid = inum;
664 key.type = BTRFS_INODE_ITEM_KEY;
665 key.offset = 0;
666
667 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200668 if (ret) {
669 btrfs_release_path(swarn->path);
670 goto err;
671 }
672
673 eb = swarn->path->nodes[0];
674 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
675 struct btrfs_inode_item);
676 isize = btrfs_inode_size(eb, inode_item);
677 nlink = btrfs_inode_nlink(eb, inode_item);
678 btrfs_release_path(swarn->path);
679
David Sterbade2491f2017-05-31 19:21:38 +0200680 /*
681 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
682 * uses GFP_NOFS in this context, so we keep it consistent but it does
683 * not seem to be strictly necessary.
684 */
685 nofs_flag = memalloc_nofs_save();
Jan Schmidt558540c2011-06-13 19:59:12 +0200686 ipath = init_ipath(4096, local_root, swarn->path);
David Sterbade2491f2017-05-31 19:21:38 +0200687 memalloc_nofs_restore(nofs_flag);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300688 if (IS_ERR(ipath)) {
689 ret = PTR_ERR(ipath);
690 ipath = NULL;
691 goto err;
692 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200693 ret = paths_from_inode(inum, ipath);
694
695 if (ret < 0)
696 goto err;
697
698 /*
699 * we deliberately ignore the bit ipath might have been too small to
700 * hold all of the paths here
701 */
702 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400703 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200704"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400705 swarn->errstr, swarn->logical,
706 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200707 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400708 root, inum, offset,
709 min(isize - offset, (u64)PAGE_SIZE), nlink,
710 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200711
712 free_ipath(ipath);
713 return 0;
714
715err:
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400716 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200717 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400718 swarn->errstr, swarn->logical,
719 rcu_str_deref(swarn->dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200720 swarn->physical,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400721 root, inum, offset, ret);
Jan Schmidt558540c2011-06-13 19:59:12 +0200722
723 free_ipath(ipath);
724 return 0;
725}
726
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400727static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200728{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100729 struct btrfs_device *dev;
730 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200731 struct btrfs_path *path;
732 struct btrfs_key found_key;
733 struct extent_buffer *eb;
734 struct btrfs_extent_item *ei;
735 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200736 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100737 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600738 u64 flags = 0;
739 u64 ref_root;
740 u32 item_size;
Dan Carpenter07c9a8e2016-03-11 11:08:56 +0300741 u8 ref_level = 0;
Liu Bo69917e42012-09-07 20:01:28 -0600742 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200743
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100744 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100745 dev = sblock->pagev[0]->dev;
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400746 fs_info = sblock->sctx->fs_info;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100747
Jan Schmidt558540c2011-06-13 19:59:12 +0200748 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200749 if (!path)
750 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200751
David Sterba6aa21262017-10-04 17:07:07 +0200752 swarn.physical = sblock->pagev[0]->physical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100753 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200754 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100755 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200756
Liu Bo69917e42012-09-07 20:01:28 -0600757 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
758 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200759 if (ret < 0)
760 goto out;
761
Jan Schmidt4692cf52011-12-02 14:56:41 +0100762 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200763 swarn.extent_item_size = found_key.offset;
764
765 eb = path->nodes[0];
766 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
767 item_size = btrfs_item_size_nr(eb, path->slots[0]);
768
Liu Bo69917e42012-09-07 20:01:28 -0600769 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200770 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800771 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
772 item_size, &ref_root,
773 &ref_level);
David Sterbaecaeb142015-10-08 09:01:03 +0200774 btrfs_warn_in_rcu(fs_info,
David Sterba6aa21262017-10-04 17:07:07 +0200775"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400776 errstr, swarn.logical,
Josef Bacik606686e2012-06-04 14:03:51 -0400777 rcu_str_deref(dev->name),
David Sterba6aa21262017-10-04 17:07:07 +0200778 swarn.physical,
Jan Schmidt558540c2011-06-13 19:59:12 +0200779 ref_level ? "node" : "leaf",
780 ret < 0 ? -1 : ref_level,
781 ret < 0 ? -1 : ref_root);
782 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600783 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200784 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600785 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200786 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100787 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100788 iterate_extent_inodes(fs_info, found_key.objectid,
789 extent_item_pos, 1,
Zygo Blaxellc995ab32017-09-22 13:58:45 -0400790 scrub_print_warning_inode, &swarn, false);
Jan Schmidt558540c2011-06-13 19:59:12 +0200791 }
792
793out:
794 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200795}
796
Miao Xieaf8e2d12014-10-23 14:42:50 +0800797static inline void scrub_get_recover(struct scrub_recover *recover)
798{
Elena Reshetova6f615012017-03-03 10:55:21 +0200799 refcount_inc(&recover->refs);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800800}
801
Qu Wenruoe501bfe2017-03-29 09:33:22 +0800802static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
803 struct scrub_recover *recover)
Miao Xieaf8e2d12014-10-23 14:42:50 +0800804{
Elena Reshetova6f615012017-03-03 10:55:21 +0200805 if (refcount_dec_and_test(&recover->refs)) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +0800806 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +0800807 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800808 kfree(recover);
809 }
810}
811
Arne Jansena2de7332011-03-08 14:14:00 +0100812/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400813 * scrub_handle_errored_block gets called when either verification of the
814 * pages failed or the bio failed to read, e.g. with EIO. In the latter
815 * case, this function handles all pages in the bio, even though only one
816 * may be bad.
817 * The goal of this function is to repair the errored block by using the
818 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100819 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400820static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100821{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100822 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100823 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400824 struct btrfs_fs_info *fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400825 u64 logical;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400826 unsigned int failed_mirror_index;
827 unsigned int is_metadata;
828 unsigned int have_csum;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400829 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
830 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100831 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400832 int mirror_index;
833 int page_num;
834 int success;
Qu Wenruo28d70e22017-04-14 08:35:55 +0800835 bool full_stripe_locked;
Filipe Manana7c3c7cb2018-12-07 13:23:32 +0000836 unsigned int nofs_flag;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400837 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
838 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100839
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400840 BUG_ON(sblock_to_check->page_count < 1);
Jeff Mahoneyfb456252016-06-22 18:54:56 -0400841 fs_info = sctx->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000842 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
843 /*
844 * if we find an error in a super block, we just report it.
845 * They will get written with the next transaction commit
846 * anyway
847 */
848 spin_lock(&sctx->stat_lock);
849 ++sctx->stat.super_errors;
850 spin_unlock(&sctx->stat_lock);
851 return 0;
852 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100853 logical = sblock_to_check->pagev[0]->logical;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100854 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
855 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
856 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400857 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100858 have_csum = sblock_to_check->pagev[0]->have_csum;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100859 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400860
Qu Wenruo28d70e22017-04-14 08:35:55 +0800861 /*
Filipe Manana7c3c7cb2018-12-07 13:23:32 +0000862 * We must use GFP_NOFS because the scrub task might be waiting for a
863 * worker task executing this function and in turn a transaction commit
864 * might be waiting the scrub task to pause (which needs to wait for all
865 * the worker tasks to complete before pausing).
866 * We do allocations in the workers through insert_full_stripe_lock()
867 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
868 * this function.
869 */
870 nofs_flag = memalloc_nofs_save();
871 /*
Qu Wenruo28d70e22017-04-14 08:35:55 +0800872 * For RAID5/6, race can happen for a different device scrub thread.
873 * For data corruption, Parity and Data threads will both try
874 * to recovery the data.
875 * Race can lead to doubly added csum error, or even unrecoverable
876 * error.
877 */
878 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
879 if (ret < 0) {
Filipe Manana7c3c7cb2018-12-07 13:23:32 +0000880 memalloc_nofs_restore(nofs_flag);
Qu Wenruo28d70e22017-04-14 08:35:55 +0800881 spin_lock(&sctx->stat_lock);
882 if (ret == -ENOMEM)
883 sctx->stat.malloc_errors++;
884 sctx->stat.read_errors++;
885 sctx->stat.uncorrectable_errors++;
886 spin_unlock(&sctx->stat_lock);
887 return ret;
888 }
889
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400890 /*
891 * read all mirrors one after the other. This includes to
892 * re-read the extent or metadata block that failed (that was
893 * the cause that this fixup code is called) another time,
894 * page by page this time in order to know which pages
895 * caused I/O errors and which ones are good (for all mirrors).
896 * It is the goal to handle the situation when more than one
897 * mirror contains I/O errors, but the errors do not
898 * overlap, i.e. the data can be repaired by selecting the
899 * pages from those mirrors without I/O error on the
900 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
901 * would be that mirror #1 has an I/O error on the first page,
902 * the second page is good, and mirror #2 has an I/O error on
903 * the second page, but the first page is good.
904 * Then the first page of the first mirror can be repaired by
905 * taking the first page of the second mirror, and the
906 * second page of the second mirror can be repaired by
907 * copying the contents of the 2nd page of the 1st mirror.
908 * One more note: if the pages of one mirror contain I/O
909 * errors, the checksum cannot be verified. In order to get
910 * the best data for repairing, the first attempt is to find
911 * a mirror without I/O errors and with a validated checksum.
912 * Only if this is not possible, the pages are picked from
913 * mirrors with I/O errors without considering the checksum.
914 * If the latter is the case, at the end, the checksum of the
915 * repaired area is verified in order to correctly maintain
916 * the statistics.
917 */
918
David Sterba31e818f2015-02-20 18:00:26 +0100919 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
Filipe Manana7c3c7cb2018-12-07 13:23:32 +0000920 sizeof(*sblocks_for_recheck), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400921 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100922 spin_lock(&sctx->stat_lock);
923 sctx->stat.malloc_errors++;
924 sctx->stat.read_errors++;
925 sctx->stat.uncorrectable_errors++;
926 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100927 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400928 goto out;
929 }
930
931 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +0800932 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400933 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100934 spin_lock(&sctx->stat_lock);
935 sctx->stat.read_errors++;
936 sctx->stat.uncorrectable_errors++;
937 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100938 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400939 goto out;
940 }
941 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
942 sblock_bad = sblocks_for_recheck + failed_mirror_index;
943
944 /* build and submit the bios for the failed mirror, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +0800945 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400946
947 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
948 sblock_bad->no_io_error_seen) {
949 /*
950 * the error disappeared after reading page by page, or
951 * the area was part of a huge bio and other parts of the
952 * bio caused I/O errors, or the block layer merged several
953 * read requests into one and the error is caused by a
954 * different bio (usually one of the two latter cases is
955 * the cause)
956 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100957 spin_lock(&sctx->stat_lock);
958 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800959 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100960 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400961
Stefan Behrensff023aa2012-11-06 11:43:11 +0100962 if (sctx->is_dev_replace)
963 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400964 goto out;
965 }
966
967 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100968 spin_lock(&sctx->stat_lock);
969 sctx->stat.read_errors++;
970 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400971 if (__ratelimit(&_rs))
972 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100973 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400974 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100975 spin_lock(&sctx->stat_lock);
976 sctx->stat.csum_errors++;
977 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400978 if (__ratelimit(&_rs))
979 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100980 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200981 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400982 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100983 spin_lock(&sctx->stat_lock);
984 sctx->stat.verify_errors++;
985 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400986 if (__ratelimit(&_rs))
987 scrub_print_warning("checksum/header error",
988 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200989 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100990 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200991 BTRFS_DEV_STAT_GENERATION_ERRS);
992 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100993 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +0200994 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400995 }
996
Ilya Dryomov33ef30a2013-11-03 19:06:38 +0200997 if (sctx->readonly) {
998 ASSERT(!sctx->is_dev_replace);
999 goto out;
1000 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001001
Qu Wenruo665d4952018-07-11 13:41:21 +08001002 /*
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001003 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001004 * checksums.
1005 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001006 * errors and also does not have a checksum error.
1007 * If one is found, and if a checksum is present, the full block
1008 * that is known to contain an error is rewritten. Afterwards
1009 * the block is known to be corrected.
1010 * If a mirror is found which is completely correct, and no
1011 * checksum is present, only those pages are rewritten that had
1012 * an I/O error in the block to be repaired, since it cannot be
1013 * determined, which copy of the other pages is better (and it
1014 * could happen otherwise that a correct page would be
1015 * overwritten by a bad one).
1016 */
Liu Bo762221f2018-01-02 13:36:42 -07001017 for (mirror_index = 0; ;mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001018 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001019
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001020 if (mirror_index == failed_mirror_index)
1021 continue;
Liu Bo762221f2018-01-02 13:36:42 -07001022
1023 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1024 if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1025 if (mirror_index >= BTRFS_MAX_MIRRORS)
1026 break;
1027 if (!sblocks_for_recheck[mirror_index].page_count)
1028 break;
1029
1030 sblock_other = sblocks_for_recheck + mirror_index;
1031 } else {
1032 struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1033 int max_allowed = r->bbio->num_stripes -
1034 r->bbio->num_tgtdevs;
1035
1036 if (mirror_index >= max_allowed)
1037 break;
1038 if (!sblocks_for_recheck[1].page_count)
1039 break;
1040
1041 ASSERT(failed_mirror_index == 0);
1042 sblock_other = sblocks_for_recheck + 1;
1043 sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1044 }
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001045
1046 /* build and submit the bios, check checksums */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001047 scrub_recheck_block(fs_info, sblock_other, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001048
1049 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001050 !sblock_other->checksum_error &&
1051 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001052 if (sctx->is_dev_replace) {
1053 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001054 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001055 } else {
1056 ret = scrub_repair_block_from_good_copy(
1057 sblock_bad, sblock_other);
1058 if (!ret)
1059 goto corrected_error;
1060 }
Arne Jansena2de7332011-03-08 14:14:00 +01001061 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001062 }
1063
Zhao Leib968fed2015-01-20 15:11:41 +08001064 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1065 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001066
1067 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001068 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001069 * repaired, continue by picking good copies of those pages.
1070 * Select the good pages from mirrors to rewrite bad pages from
1071 * the area to fix. Afterwards verify the checksum of the block
1072 * that is supposed to be repaired. This verification step is
1073 * only done for the purpose of statistic counting and for the
1074 * final scrub report, whether errors remain.
1075 * A perfect algorithm could make use of the checksum and try
1076 * all possible combinations of pages from the different mirrors
1077 * until the checksum verification succeeds. For example, when
1078 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1079 * of mirror #2 is readable but the final checksum test fails,
1080 * then the 2nd page of mirror #3 could be tried, whether now
Nicholas D Steeves01327612016-05-19 21:18:45 -04001081 * the final checksum succeeds. But this would be a rare
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001082 * exception and is therefore not implemented. At least it is
1083 * avoided that the good copy is overwritten.
1084 * A more useful improvement would be to pick the sectors
1085 * without I/O error based on sector sizes (512 bytes on legacy
1086 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1087 * mirror could be repaired by taking 512 byte of a different
1088 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1089 * area are unreadable.
1090 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001091 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001092 for (page_num = 0; page_num < sblock_bad->page_count;
1093 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001094 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001095 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001096
Zhao Leib968fed2015-01-20 15:11:41 +08001097 /* skip no-io-error page in scrub */
1098 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001099 continue;
1100
Liu Bo47597002018-03-02 16:10:41 -07001101 if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1102 /*
1103 * In case of dev replace, if raid56 rebuild process
1104 * didn't work out correct data, then copy the content
1105 * in sblock_bad to make sure target device is identical
1106 * to source device, instead of writing garbage data in
1107 * sblock_for_recheck array to target device.
1108 */
1109 sblock_other = NULL;
1110 } else if (page_bad->io_error) {
1111 /* try to find no-io-error page in mirrors */
Zhao Leib968fed2015-01-20 15:11:41 +08001112 for (mirror_index = 0;
1113 mirror_index < BTRFS_MAX_MIRRORS &&
1114 sblocks_for_recheck[mirror_index].page_count > 0;
1115 mirror_index++) {
1116 if (!sblocks_for_recheck[mirror_index].
1117 pagev[page_num]->io_error) {
1118 sblock_other = sblocks_for_recheck +
1119 mirror_index;
1120 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001121 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001122 }
Zhao Leib968fed2015-01-20 15:11:41 +08001123 if (!sblock_other)
1124 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001125 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001126
Zhao Leib968fed2015-01-20 15:11:41 +08001127 if (sctx->is_dev_replace) {
1128 /*
1129 * did not find a mirror to fetch the page
1130 * from. scrub_write_page_to_dev_replace()
1131 * handles this case (page->io_error), by
1132 * filling the block with zeros before
1133 * submitting the write request
1134 */
1135 if (!sblock_other)
1136 sblock_other = sblock_bad;
1137
1138 if (scrub_write_page_to_dev_replace(sblock_other,
1139 page_num) != 0) {
David Sterbae37abe92018-04-04 17:20:52 +02001140 atomic64_inc(
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001141 &fs_info->dev_replace.num_write_errors);
Zhao Leib968fed2015-01-20 15:11:41 +08001142 success = 0;
1143 }
1144 } else if (sblock_other) {
1145 ret = scrub_repair_page_from_good_copy(sblock_bad,
1146 sblock_other,
1147 page_num, 0);
1148 if (0 == ret)
1149 page_bad->io_error = 0;
1150 else
1151 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001152 }
1153 }
1154
Zhao Leib968fed2015-01-20 15:11:41 +08001155 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001156 if (is_metadata || have_csum) {
1157 /*
1158 * need to verify the checksum now that all
1159 * sectors on disk are repaired (the write
1160 * request for data to be repaired is on its way).
1161 * Just be lazy and use scrub_recheck_block()
1162 * which re-reads the data before the checksum
1163 * is verified, but most likely the data comes out
1164 * of the page cache.
1165 */
Zhao Leiaffe4a52015-08-24 21:32:06 +08001166 scrub_recheck_block(fs_info, sblock_bad, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001167 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001168 !sblock_bad->checksum_error &&
1169 sblock_bad->no_io_error_seen)
1170 goto corrected_error;
1171 else
1172 goto did_not_correct_error;
1173 } else {
1174corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001175 spin_lock(&sctx->stat_lock);
1176 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001177 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001178 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001179 btrfs_err_rl_in_rcu(fs_info,
1180 "fixed up error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001181 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001182 }
1183 } else {
1184did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001185 spin_lock(&sctx->stat_lock);
1186 sctx->stat.uncorrectable_errors++;
1187 spin_unlock(&sctx->stat_lock);
David Sterbab14af3b2015-10-08 10:43:10 +02001188 btrfs_err_rl_in_rcu(fs_info,
1189 "unable to fixup (regular) error at logical %llu on dev %s",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001190 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001191 }
1192
1193out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001194 if (sblocks_for_recheck) {
1195 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1196 mirror_index++) {
1197 struct scrub_block *sblock = sblocks_for_recheck +
1198 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001199 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001200 int page_index;
1201
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001202 for (page_index = 0; page_index < sblock->page_count;
1203 page_index++) {
1204 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001205 recover = sblock->pagev[page_index]->recover;
1206 if (recover) {
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001207 scrub_put_recover(fs_info, recover);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001208 sblock->pagev[page_index]->recover =
1209 NULL;
1210 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001211 scrub_page_put(sblock->pagev[page_index]);
1212 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001213 }
1214 kfree(sblocks_for_recheck);
1215 }
1216
Qu Wenruo28d70e22017-04-14 08:35:55 +08001217 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
Filipe Manana7c3c7cb2018-12-07 13:23:32 +00001218 memalloc_nofs_restore(nofs_flag);
Qu Wenruo28d70e22017-04-14 08:35:55 +08001219 if (ret < 0)
1220 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001221 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001222}
1223
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001224static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001225{
Zhao Lei10f11902015-01-20 15:11:43 +08001226 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1227 return 2;
1228 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1229 return 3;
1230 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001231 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001232}
1233
Zhao Lei10f11902015-01-20 15:11:43 +08001234static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1235 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001236 u64 mapped_length,
1237 int nstripes, int mirror,
1238 int *stripe_index,
1239 u64 *stripe_offset)
1240{
1241 int i;
1242
Zhao Leiffe2d202015-01-20 15:11:44 +08001243 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001244 /* RAID5/6 */
1245 for (i = 0; i < nstripes; i++) {
1246 if (raid_map[i] == RAID6_Q_STRIPE ||
1247 raid_map[i] == RAID5_P_STRIPE)
1248 continue;
1249
1250 if (logical >= raid_map[i] &&
1251 logical < raid_map[i] + mapped_length)
1252 break;
1253 }
1254
1255 *stripe_index = i;
1256 *stripe_offset = logical - raid_map[i];
1257 } else {
1258 /* The other RAID type */
1259 *stripe_index = mirror;
1260 *stripe_offset = 0;
1261 }
1262}
1263
Zhao Leibe50a8d2015-01-20 15:11:42 +08001264static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001265 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001266{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001267 struct scrub_ctx *sctx = original_sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001268 struct btrfs_fs_info *fs_info = sctx->fs_info;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001269 u64 length = original_sblock->page_count * PAGE_SIZE;
1270 u64 logical = original_sblock->pagev[0]->logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001271 u64 generation = original_sblock->pagev[0]->generation;
1272 u64 flags = original_sblock->pagev[0]->flags;
1273 u64 have_csum = original_sblock->pagev[0]->have_csum;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001274 struct scrub_recover *recover;
1275 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001276 u64 sublen;
1277 u64 mapped_length;
1278 u64 stripe_offset;
1279 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001280 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001281 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001282 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001283 int ret;
1284
1285 /*
Zhao Lei57019342015-01-20 15:11:45 +08001286 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001287 * are not used (and not set) in the blocks that are used for
1288 * the recheck procedure
1289 */
1290
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001291 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001292 sublen = min_t(u64, length, PAGE_SIZE);
1293 mapped_length = sublen;
1294 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001295
1296 /*
1297 * with a length of PAGE_SIZE, each returned stripe
1298 * represents one mirror
1299 */
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001300 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02001301 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
David Sterba825ad4c2017-03-28 14:45:22 +02001302 logical, &mapped_length, &bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001303 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001304 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001305 btrfs_bio_counter_dec(fs_info);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001306 return -EIO;
1307 }
1308
Miao Xieaf8e2d12014-10-23 14:42:50 +08001309 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1310 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001311 btrfs_put_bbio(bbio);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001312 btrfs_bio_counter_dec(fs_info);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001313 return -ENOMEM;
1314 }
1315
Elena Reshetova6f615012017-03-03 10:55:21 +02001316 refcount_set(&recover->refs, 1);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001317 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001318 recover->map_length = mapped_length;
1319
Ashish Samant24731142016-04-29 18:33:59 -07001320 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001321
Zhao Leibe50a8d2015-01-20 15:11:42 +08001322 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001323
Miao Xieaf8e2d12014-10-23 14:42:50 +08001324 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001325 mirror_index++) {
1326 struct scrub_block *sblock;
1327 struct scrub_page *page;
1328
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001329 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001330 sblock->sctx = sctx;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001331
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001332 page = kzalloc(sizeof(*page), GFP_NOFS);
1333 if (!page) {
1334leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001335 spin_lock(&sctx->stat_lock);
1336 sctx->stat.malloc_errors++;
1337 spin_unlock(&sctx->stat_lock);
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001338 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001339 return -ENOMEM;
1340 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001341 scrub_page_get(page);
1342 sblock->pagev[page_index] = page;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001343 page->sblock = sblock;
1344 page->flags = flags;
1345 page->generation = generation;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001346 page->logical = logical;
Zhao Lei4734b7e2015-08-19 22:39:18 +08001347 page->have_csum = have_csum;
1348 if (have_csum)
1349 memcpy(page->csum,
1350 original_sblock->pagev[0]->csum,
1351 sctx->csum_size);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001352
Zhao Lei10f11902015-01-20 15:11:43 +08001353 scrub_stripe_index_and_offset(logical,
1354 bbio->map_type,
1355 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001356 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001357 bbio->num_stripes -
1358 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001359 mirror_index,
1360 &stripe_index,
1361 &stripe_offset);
1362 page->physical = bbio->stripes[stripe_index].physical +
1363 stripe_offset;
1364 page->dev = bbio->stripes[stripe_index].dev;
1365
Stefan Behrensff023aa2012-11-06 11:43:11 +01001366 BUG_ON(page_index >= original_sblock->page_count);
1367 page->physical_for_dev_replace =
1368 original_sblock->pagev[page_index]->
1369 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001370 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001371 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001372 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001373 page->page = alloc_page(GFP_NOFS);
1374 if (!page->page)
1375 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001376
1377 scrub_get_recover(recover);
1378 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001379 }
Qu Wenruoe501bfe2017-03-29 09:33:22 +08001380 scrub_put_recover(fs_info, recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001381 length -= sublen;
1382 logical += sublen;
1383 page_index++;
1384 }
1385
1386 return 0;
1387}
1388
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001389static void scrub_bio_wait_endio(struct bio *bio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001390{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001391 complete(bio->bi_private);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001392}
1393
Miao Xieaf8e2d12014-10-23 14:42:50 +08001394static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1395 struct bio *bio,
1396 struct scrub_page *page)
1397{
Liu Bob4ff5ad2017-11-30 17:26:39 -07001398 DECLARE_COMPLETION_ONSTACK(done);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001399 int ret;
Liu Bo762221f2018-01-02 13:36:42 -07001400 int mirror_num;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001401
Miao Xieaf8e2d12014-10-23 14:42:50 +08001402 bio->bi_iter.bi_sector = page->logical >> 9;
1403 bio->bi_private = &done;
1404 bio->bi_end_io = scrub_bio_wait_endio;
1405
Liu Bo762221f2018-01-02 13:36:42 -07001406 mirror_num = page->sblock->pagev[0]->mirror_num;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001407 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001408 page->recover->map_length,
Liu Bo762221f2018-01-02 13:36:42 -07001409 mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001410 if (ret)
1411 return ret;
1412
Liu Bob4ff5ad2017-11-30 17:26:39 -07001413 wait_for_completion_io(&done);
1414 return blk_status_to_errno(bio->bi_status);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001415}
1416
Liu Bo6ca17652018-03-07 12:08:09 -07001417static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1418 struct scrub_block *sblock)
1419{
1420 struct scrub_page *first_page = sblock->pagev[0];
1421 struct bio *bio;
1422 int page_num;
1423
1424 /* All pages in sblock belong to the same stripe on the same device. */
1425 ASSERT(first_page->dev);
1426 if (!first_page->dev->bdev)
1427 goto out;
1428
1429 bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1430 bio_set_dev(bio, first_page->dev->bdev);
1431
1432 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1433 struct scrub_page *page = sblock->pagev[page_num];
1434
1435 WARN_ON(!page->page);
1436 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1437 }
1438
1439 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1440 bio_put(bio);
1441 goto out;
1442 }
1443
1444 bio_put(bio);
1445
1446 scrub_recheck_block_checksum(sblock);
1447
1448 return;
1449out:
1450 for (page_num = 0; page_num < sblock->page_count; page_num++)
1451 sblock->pagev[page_num]->io_error = 1;
1452
1453 sblock->no_io_error_seen = 0;
1454}
1455
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001456/*
1457 * this function will check the on disk data for checksum errors, header
1458 * errors and read I/O errors. If any I/O errors happen, the exact pages
1459 * which are errored are marked as being bad. The goal is to enable scrub
1460 * to take those pages that are not errored from all the mirrors so that
1461 * the pages that are errored in the just handled mirror can be repaired.
1462 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001463static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
Zhao Leiaffe4a52015-08-24 21:32:06 +08001464 struct scrub_block *sblock,
1465 int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001466{
1467 int page_num;
1468
1469 sblock->no_io_error_seen = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001470
Liu Bo6ca17652018-03-07 12:08:09 -07001471 /* short cut for raid56 */
1472 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1473 return scrub_recheck_block_on_raid56(fs_info, sblock);
1474
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001475 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1476 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001477 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001478
Stefan Behrens442a4f62012-05-25 16:06:08 +02001479 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001480 page->io_error = 1;
1481 sblock->no_io_error_seen = 0;
1482 continue;
1483 }
1484
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001485 WARN_ON(!page->page);
David Sterbac5e4c3d2017-06-12 17:29:41 +02001486 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001487 bio_set_dev(bio, page->dev->bdev);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001488
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001489 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Liu Bo6ca17652018-03-07 12:08:09 -07001490 bio->bi_iter.bi_sector = page->physical >> 9;
1491 bio->bi_opf = REQ_OP_READ;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001492
Liu Bo6ca17652018-03-07 12:08:09 -07001493 if (btrfsic_submit_bio_wait(bio)) {
1494 page->io_error = 1;
1495 sblock->no_io_error_seen = 0;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001496 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001497
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001498 bio_put(bio);
1499 }
1500
1501 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08001502 scrub_recheck_block_checksum(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001503}
1504
Miao Xie17a9be22014-07-24 11:37:08 +08001505static inline int scrub_check_fsid(u8 fsid[],
1506 struct scrub_page *spage)
1507{
1508 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1509 int ret;
1510
Anand Jain44880fd2017-07-29 17:50:09 +08001511 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
Miao Xie17a9be22014-07-24 11:37:08 +08001512 return !ret;
1513}
1514
Zhao Leiba7cf982015-08-24 21:18:02 +08001515static void scrub_recheck_block_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001516{
Zhao Leiba7cf982015-08-24 21:18:02 +08001517 sblock->header_error = 0;
1518 sblock->checksum_error = 0;
1519 sblock->generation_error = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001520
Zhao Leiba7cf982015-08-24 21:18:02 +08001521 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1522 scrub_checksum_data(sblock);
1523 else
1524 scrub_checksum_tree_block(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001525}
1526
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001527static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001528 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001529{
1530 int page_num;
1531 int ret = 0;
1532
1533 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1534 int ret_sub;
1535
1536 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1537 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001538 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001539 if (ret_sub)
1540 ret = ret_sub;
1541 }
1542
1543 return ret;
1544}
1545
1546static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1547 struct scrub_block *sblock_good,
1548 int page_num, int force_write)
1549{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001550 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1551 struct scrub_page *page_good = sblock_good->pagev[page_num];
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001552 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001553
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001554 BUG_ON(page_bad->page == NULL);
1555 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001556 if (force_write || sblock_bad->header_error ||
1557 sblock_bad->checksum_error || page_bad->io_error) {
1558 struct bio *bio;
1559 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001560
Stefan Behrensff023aa2012-11-06 11:43:11 +01001561 if (!page_bad->dev->bdev) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001562 btrfs_warn_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001563 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001564 return -EIO;
1565 }
1566
David Sterbac5e4c3d2017-06-12 17:29:41 +02001567 bio = btrfs_io_bio_alloc(1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001568 bio_set_dev(bio, page_bad->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001569 bio->bi_iter.bi_sector = page_bad->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02001570 bio->bi_opf = REQ_OP_WRITE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001571
1572 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1573 if (PAGE_SIZE != ret) {
1574 bio_put(bio);
1575 return -EIO;
1576 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001577
Mike Christie4e49ea42016-06-05 14:31:41 -05001578 if (btrfsic_submit_bio_wait(bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001579 btrfs_dev_stat_inc_and_print(page_bad->dev,
1580 BTRFS_DEV_STAT_WRITE_ERRS);
David Sterbae37abe92018-04-04 17:20:52 +02001581 atomic64_inc(&fs_info->dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001582 bio_put(bio);
1583 return -EIO;
1584 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001585 bio_put(bio);
1586 }
1587
1588 return 0;
1589}
1590
Stefan Behrensff023aa2012-11-06 11:43:11 +01001591static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1592{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001593 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001594 int page_num;
1595
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001596 /*
1597 * This block is used for the check of the parity on the source device,
1598 * so the data needn't be written into the destination device.
1599 */
1600 if (sblock->sparity)
1601 return;
1602
Stefan Behrensff023aa2012-11-06 11:43:11 +01001603 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1604 int ret;
1605
1606 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1607 if (ret)
David Sterbae37abe92018-04-04 17:20:52 +02001608 atomic64_inc(&fs_info->dev_replace.num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001609 }
1610}
1611
1612static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1613 int page_num)
1614{
1615 struct scrub_page *spage = sblock->pagev[page_num];
1616
1617 BUG_ON(spage->page == NULL);
1618 if (spage->io_error) {
1619 void *mapped_buffer = kmap_atomic(spage->page);
1620
David Sterba619a9742017-03-29 20:48:44 +02001621 clear_page(mapped_buffer);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001622 flush_dcache_page(spage->page);
1623 kunmap_atomic(mapped_buffer);
1624 }
1625 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1626}
1627
1628static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1629 struct scrub_page *spage)
1630{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001631 struct scrub_bio *sbio;
1632 int ret;
1633
David Sterba3fb99302017-05-16 19:10:32 +02001634 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001635again:
David Sterba3fb99302017-05-16 19:10:32 +02001636 if (!sctx->wr_curr_bio) {
1637 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
David Sterba58c4e172016-02-11 10:49:42 +01001638 GFP_KERNEL);
David Sterba3fb99302017-05-16 19:10:32 +02001639 if (!sctx->wr_curr_bio) {
1640 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001641 return -ENOMEM;
1642 }
David Sterba3fb99302017-05-16 19:10:32 +02001643 sctx->wr_curr_bio->sctx = sctx;
1644 sctx->wr_curr_bio->page_count = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001645 }
David Sterba3fb99302017-05-16 19:10:32 +02001646 sbio = sctx->wr_curr_bio;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001647 if (sbio->page_count == 0) {
1648 struct bio *bio;
1649
1650 sbio->physical = spage->physical_for_dev_replace;
1651 sbio->logical = spage->logical;
David Sterba3fb99302017-05-16 19:10:32 +02001652 sbio->dev = sctx->wr_tgtdev;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001653 bio = sbio->bio;
1654 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02001655 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001656 sbio->bio = bio;
1657 }
1658
1659 bio->bi_private = sbio;
1660 bio->bi_end_io = scrub_wr_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001661 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001662 bio->bi_iter.bi_sector = sbio->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02001663 bio->bi_opf = REQ_OP_WRITE;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001664 sbio->status = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001665 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1666 spage->physical_for_dev_replace ||
1667 sbio->logical + sbio->page_count * PAGE_SIZE !=
1668 spage->logical) {
1669 scrub_wr_submit(sctx);
1670 goto again;
1671 }
1672
1673 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1674 if (ret != PAGE_SIZE) {
1675 if (sbio->page_count < 1) {
1676 bio_put(sbio->bio);
1677 sbio->bio = NULL;
David Sterba3fb99302017-05-16 19:10:32 +02001678 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001679 return -EIO;
1680 }
1681 scrub_wr_submit(sctx);
1682 goto again;
1683 }
1684
1685 sbio->pagev[sbio->page_count] = spage;
1686 scrub_page_get(spage);
1687 sbio->page_count++;
David Sterba3fb99302017-05-16 19:10:32 +02001688 if (sbio->page_count == sctx->pages_per_wr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001689 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02001690 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001691
1692 return 0;
1693}
1694
1695static void scrub_wr_submit(struct scrub_ctx *sctx)
1696{
Stefan Behrensff023aa2012-11-06 11:43:11 +01001697 struct scrub_bio *sbio;
1698
David Sterba3fb99302017-05-16 19:10:32 +02001699 if (!sctx->wr_curr_bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001700 return;
1701
David Sterba3fb99302017-05-16 19:10:32 +02001702 sbio = sctx->wr_curr_bio;
1703 sctx->wr_curr_bio = NULL;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001704 WARN_ON(!sbio->bio->bi_disk);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001705 scrub_pending_bio_inc(sctx);
1706 /* process all writes in a single worker thread. Then the block layer
1707 * orders the requests before sending them to the driver which
1708 * doubled the write performance on spinning disks when measured
1709 * with Linux 3.5 */
Mike Christie4e49ea42016-06-05 14:31:41 -05001710 btrfsic_submit_bio(sbio->bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001711}
1712
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001713static void scrub_wr_bio_end_io(struct bio *bio)
Stefan Behrensff023aa2012-11-06 11:43:11 +01001714{
1715 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001716 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001717
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001718 sbio->status = bio->bi_status;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001719 sbio->bio = bio;
1720
Liu Bo9e0af232014-08-15 23:36:53 +08001721 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1722 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001723 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001724}
1725
1726static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1727{
1728 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1729 struct scrub_ctx *sctx = sbio->sctx;
1730 int i;
1731
1732 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001733 if (sbio->status) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001734 struct btrfs_dev_replace *dev_replace =
Jeff Mahoneyfb456252016-06-22 18:54:56 -04001735 &sbio->sctx->fs_info->dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001736
1737 for (i = 0; i < sbio->page_count; i++) {
1738 struct scrub_page *spage = sbio->pagev[i];
1739
1740 spage->io_error = 1;
David Sterbae37abe92018-04-04 17:20:52 +02001741 atomic64_inc(&dev_replace->num_write_errors);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001742 }
1743 }
1744
1745 for (i = 0; i < sbio->page_count; i++)
1746 scrub_page_put(sbio->pagev[i]);
1747
1748 bio_put(sbio->bio);
1749 kfree(sbio);
1750 scrub_pending_bio_dec(sctx);
1751}
1752
1753static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001754{
1755 u64 flags;
1756 int ret;
1757
Zhao Leiba7cf982015-08-24 21:18:02 +08001758 /*
1759 * No need to initialize these stats currently,
1760 * because this function only use return value
1761 * instead of these stats value.
1762 *
1763 * Todo:
1764 * always use stats
1765 */
1766 sblock->header_error = 0;
1767 sblock->generation_error = 0;
1768 sblock->checksum_error = 0;
1769
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001770 WARN_ON(sblock->page_count < 1);
1771 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001772 ret = 0;
1773 if (flags & BTRFS_EXTENT_FLAG_DATA)
1774 ret = scrub_checksum_data(sblock);
1775 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1776 ret = scrub_checksum_tree_block(sblock);
1777 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1778 (void)scrub_checksum_super(sblock);
1779 else
1780 WARN_ON(1);
1781 if (ret)
1782 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001783
1784 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001785}
1786
1787static int scrub_checksum_data(struct scrub_block *sblock)
1788{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001789 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001790 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001791 u8 *on_disk_csum;
1792 struct page *page;
1793 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001794 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001795 u64 len;
1796 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001797
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001798 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001799 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001800 return 0;
1801
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001802 on_disk_csum = sblock->pagev[0]->csum;
1803 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001804 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001805
David Sterba25cc1222017-05-16 19:10:41 +02001806 len = sctx->fs_info->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001807 index = 0;
1808 for (;;) {
1809 u64 l = min_t(u64, len, PAGE_SIZE);
1810
Liu Bob0496682013-03-14 14:57:45 +00001811 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001812 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001813 len -= l;
1814 if (len == 0)
1815 break;
1816 index++;
1817 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001818 BUG_ON(!sblock->pagev[index]->page);
1819 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001820 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001821 }
1822
Arne Jansena2de7332011-03-08 14:14:00 +01001823 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001824 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001825 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001826
Zhao Leiba7cf982015-08-24 21:18:02 +08001827 return sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001828}
1829
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001830static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001831{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001832 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001833 struct btrfs_header *h;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001834 struct btrfs_fs_info *fs_info = sctx->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001835 u8 calculated_csum[BTRFS_CSUM_SIZE];
1836 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1837 struct page *page;
1838 void *mapped_buffer;
1839 u64 mapped_size;
1840 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001841 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001842 u64 len;
1843 int index;
1844
1845 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001846 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001847 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001848 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001849 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001850
1851 /*
1852 * we don't use the getter functions here, as we
1853 * a) don't have an extent buffer and
1854 * b) the page is already kmapped
1855 */
Qu Wenruo3cae2102013-07-16 11:19:18 +08001856 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Zhao Leiba7cf982015-08-24 21:18:02 +08001857 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001858
Zhao Leiba7cf982015-08-24 21:18:02 +08001859 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1860 sblock->header_error = 1;
1861 sblock->generation_error = 1;
1862 }
Arne Jansena2de7332011-03-08 14:14:00 +01001863
Miao Xie17a9be22014-07-24 11:37:08 +08001864 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Zhao Leiba7cf982015-08-24 21:18:02 +08001865 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001866
1867 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1868 BTRFS_UUID_SIZE))
Zhao Leiba7cf982015-08-24 21:18:02 +08001869 sblock->header_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001870
David Sterba25cc1222017-05-16 19:10:41 +02001871 len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001872 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1873 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1874 index = 0;
1875 for (;;) {
1876 u64 l = min_t(u64, len, mapped_size);
1877
Liu Bob0496682013-03-14 14:57:45 +00001878 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001879 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001880 len -= l;
1881 if (len == 0)
1882 break;
1883 index++;
1884 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001885 BUG_ON(!sblock->pagev[index]->page);
1886 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001887 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001888 mapped_size = PAGE_SIZE;
1889 p = mapped_buffer;
1890 }
1891
1892 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001893 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Zhao Leiba7cf982015-08-24 21:18:02 +08001894 sblock->checksum_error = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01001895
Zhao Leiba7cf982015-08-24 21:18:02 +08001896 return sblock->header_error || sblock->checksum_error;
Arne Jansena2de7332011-03-08 14:14:00 +01001897}
1898
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001899static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001900{
1901 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001902 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001903 u8 calculated_csum[BTRFS_CSUM_SIZE];
1904 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1905 struct page *page;
1906 void *mapped_buffer;
1907 u64 mapped_size;
1908 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001909 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001910 int fail_gen = 0;
1911 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001912 u64 len;
1913 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001914
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001915 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001916 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001917 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001918 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001919 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001920
Qu Wenruo3cae2102013-07-16 11:19:18 +08001921 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001922 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001923
Qu Wenruo3cae2102013-07-16 11:19:18 +08001924 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001925 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001926
Miao Xie17a9be22014-07-24 11:37:08 +08001927 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001928 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001929
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001930 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1931 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1932 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1933 index = 0;
1934 for (;;) {
1935 u64 l = min_t(u64, len, mapped_size);
1936
Liu Bob0496682013-03-14 14:57:45 +00001937 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001938 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001939 len -= l;
1940 if (len == 0)
1941 break;
1942 index++;
1943 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001944 BUG_ON(!sblock->pagev[index]->page);
1945 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001946 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001947 mapped_size = PAGE_SIZE;
1948 p = mapped_buffer;
1949 }
1950
1951 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001952 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02001953 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01001954
Stefan Behrens442a4f62012-05-25 16:06:08 +02001955 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01001956 /*
1957 * if we find an error in a super block, we just report it.
1958 * They will get written with the next transaction commit
1959 * anyway
1960 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001961 spin_lock(&sctx->stat_lock);
1962 ++sctx->stat.super_errors;
1963 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001964 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001965 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001966 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1967 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001968 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001969 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01001970 }
1971
Stefan Behrens442a4f62012-05-25 16:06:08 +02001972 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01001973}
1974
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001975static void scrub_block_get(struct scrub_block *sblock)
1976{
Elena Reshetova186debd2017-03-03 10:55:23 +02001977 refcount_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001978}
1979
1980static void scrub_block_put(struct scrub_block *sblock)
1981{
Elena Reshetova186debd2017-03-03 10:55:23 +02001982 if (refcount_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001983 int i;
1984
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001985 if (sblock->sparity)
1986 scrub_parity_put(sblock->sparity);
1987
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001988 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001989 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001990 kfree(sblock);
1991 }
1992}
1993
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001994static void scrub_page_get(struct scrub_page *spage)
1995{
Zhao Lei57019342015-01-20 15:11:45 +08001996 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001997}
1998
1999static void scrub_page_put(struct scrub_page *spage)
2000{
Zhao Lei57019342015-01-20 15:11:45 +08002001 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002002 if (spage->page)
2003 __free_page(spage->page);
2004 kfree(spage);
2005 }
2006}
2007
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002008static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002009{
2010 struct scrub_bio *sbio;
2011
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002012 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002013 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002014
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002015 sbio = sctx->bios[sctx->curr];
2016 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002017 scrub_pending_bio_inc(sctx);
Mike Christie4e49ea42016-06-05 14:31:41 -05002018 btrfsic_submit_bio(sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002019}
2020
Stefan Behrensff023aa2012-11-06 11:43:11 +01002021static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2022 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002023{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002024 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002025 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002026 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002027
2028again:
2029 /*
2030 * grab a fresh bio or wait for one to become available
2031 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002032 while (sctx->curr == -1) {
2033 spin_lock(&sctx->list_lock);
2034 sctx->curr = sctx->first_free;
2035 if (sctx->curr != -1) {
2036 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2037 sctx->bios[sctx->curr]->next_free = -1;
2038 sctx->bios[sctx->curr]->page_count = 0;
2039 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002040 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002041 spin_unlock(&sctx->list_lock);
2042 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002043 }
2044 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002045 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002046 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002047 struct bio *bio;
2048
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002049 sbio->physical = spage->physical;
2050 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002051 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002052 bio = sbio->bio;
2053 if (!bio) {
David Sterbac5e4c3d2017-06-12 17:29:41 +02002054 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002055 sbio->bio = bio;
2056 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002057
2058 bio->bi_private = sbio;
2059 bio->bi_end_io = scrub_bio_end_io;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002060 bio_set_dev(bio, sbio->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002061 bio->bi_iter.bi_sector = sbio->physical >> 9;
David Sterbaebcc3262018-06-29 10:56:53 +02002062 bio->bi_opf = REQ_OP_READ;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002063 sbio->status = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002064 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2065 spage->physical ||
2066 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002067 spage->logical ||
2068 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002069 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002070 goto again;
2071 }
2072
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002073 sbio->pagev[sbio->page_count] = spage;
2074 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2075 if (ret != PAGE_SIZE) {
2076 if (sbio->page_count < 1) {
2077 bio_put(sbio->bio);
2078 sbio->bio = NULL;
2079 return -EIO;
2080 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002081 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002082 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002083 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002084
Stefan Behrensff023aa2012-11-06 11:43:11 +01002085 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002086 atomic_inc(&sblock->outstanding_pages);
2087 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002088 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002089 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002090
2091 return 0;
2092}
2093
Linus Torvalds22365972015-09-05 15:14:43 -07002094static void scrub_missing_raid56_end_io(struct bio *bio)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002095{
2096 struct scrub_block *sblock = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002097 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002098
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002099 if (bio->bi_status)
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002100 sblock->no_io_error_seen = 0;
2101
Scott Talbert46732722016-05-09 09:14:28 -04002102 bio_put(bio);
2103
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002104 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2105}
2106
2107static void scrub_missing_raid56_worker(struct btrfs_work *work)
2108{
2109 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2110 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002111 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002112 u64 logical;
2113 struct btrfs_device *dev;
2114
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002115 logical = sblock->pagev[0]->logical;
2116 dev = sblock->pagev[0]->dev;
2117
Zhao Leiaffe4a52015-08-24 21:32:06 +08002118 if (sblock->no_io_error_seen)
Zhao Leiba7cf982015-08-24 21:18:02 +08002119 scrub_recheck_block_checksum(sblock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002120
2121 if (!sblock->no_io_error_seen) {
2122 spin_lock(&sctx->stat_lock);
2123 sctx->stat.read_errors++;
2124 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002125 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002126 "IO error rebuilding logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002127 logical, rcu_str_deref(dev->name));
2128 } else if (sblock->header_error || sblock->checksum_error) {
2129 spin_lock(&sctx->stat_lock);
2130 sctx->stat.uncorrectable_errors++;
2131 spin_unlock(&sctx->stat_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002132 btrfs_err_rl_in_rcu(fs_info,
David Sterbab14af3b2015-10-08 10:43:10 +02002133 "failed to rebuild valid logical %llu for dev %s",
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002134 logical, rcu_str_deref(dev->name));
2135 } else {
2136 scrub_write_block_to_dev_replace(sblock);
2137 }
2138
2139 scrub_block_put(sblock);
2140
David Sterba2073c4c2017-03-31 17:12:51 +02002141 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002142 mutex_lock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002143 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002144 mutex_unlock(&sctx->wr_lock);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002145 }
2146
2147 scrub_pending_bio_dec(sctx);
2148}
2149
2150static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2151{
2152 struct scrub_ctx *sctx = sblock->sctx;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002153 struct btrfs_fs_info *fs_info = sctx->fs_info;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002154 u64 length = sblock->page_count * PAGE_SIZE;
2155 u64 logical = sblock->pagev[0]->logical;
Zhao Leif1fee652016-05-17 17:37:38 +08002156 struct btrfs_bio *bbio = NULL;
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002157 struct bio *bio;
2158 struct btrfs_raid_bio *rbio;
2159 int ret;
2160 int i;
2161
Qu Wenruoae6529c2017-03-29 09:33:21 +08002162 btrfs_bio_counter_inc_blocked(fs_info);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002163 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
David Sterba825ad4c2017-03-28 14:45:22 +02002164 &length, &bbio);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002165 if (ret || !bbio || !bbio->raid_map)
2166 goto bbio_out;
2167
2168 if (WARN_ON(!sctx->is_dev_replace ||
2169 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2170 /*
2171 * We shouldn't be scrubbing a missing device. Even for dev
2172 * replace, we should only get here for RAID 5/6. We either
2173 * managed to mount something with no mirrors remaining or
2174 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2175 */
2176 goto bbio_out;
2177 }
2178
David Sterbac5e4c3d2017-06-12 17:29:41 +02002179 bio = btrfs_io_bio_alloc(0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002180 bio->bi_iter.bi_sector = logical >> 9;
2181 bio->bi_private = sblock;
2182 bio->bi_end_io = scrub_missing_raid56_end_io;
2183
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002184 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002185 if (!rbio)
2186 goto rbio_out;
2187
2188 for (i = 0; i < sblock->page_count; i++) {
2189 struct scrub_page *spage = sblock->pagev[i];
2190
2191 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2192 }
2193
2194 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2195 scrub_missing_raid56_worker, NULL, NULL);
2196 scrub_block_get(sblock);
2197 scrub_pending_bio_inc(sctx);
2198 raid56_submit_missing_rbio(rbio);
2199 return;
2200
2201rbio_out:
2202 bio_put(bio);
2203bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002204 btrfs_bio_counter_dec(fs_info);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002205 btrfs_put_bbio(bbio);
2206 spin_lock(&sctx->stat_lock);
2207 sctx->stat.malloc_errors++;
2208 spin_unlock(&sctx->stat_lock);
2209}
2210
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002211static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002212 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002213 u64 gen, int mirror_num, u8 *csum, int force,
2214 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002215{
2216 struct scrub_block *sblock;
2217 int index;
2218
David Sterba58c4e172016-02-11 10:49:42 +01002219 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002220 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002221 spin_lock(&sctx->stat_lock);
2222 sctx->stat.malloc_errors++;
2223 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002224 return -ENOMEM;
2225 }
2226
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002227 /* one ref inside this function, plus one for each page added to
2228 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002229 refcount_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002230 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002231 sblock->no_io_error_seen = 1;
2232
2233 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002234 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002235 u64 l = min_t(u64, len, PAGE_SIZE);
2236
David Sterba58c4e172016-02-11 10:49:42 +01002237 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002238 if (!spage) {
2239leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002240 spin_lock(&sctx->stat_lock);
2241 sctx->stat.malloc_errors++;
2242 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002243 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002244 return -ENOMEM;
2245 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002246 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2247 scrub_page_get(spage);
2248 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002249 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002250 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002251 spage->flags = flags;
2252 spage->generation = gen;
2253 spage->logical = logical;
2254 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002255 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002256 spage->mirror_num = mirror_num;
2257 if (csum) {
2258 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002259 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002260 } else {
2261 spage->have_csum = 0;
2262 }
2263 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002264 spage->page = alloc_page(GFP_KERNEL);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002265 if (!spage->page)
2266 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002267 len -= l;
2268 logical += l;
2269 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002270 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002271 }
2272
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002273 WARN_ON(sblock->page_count == 0);
Anand Jaine6e674b2017-12-04 12:54:54 +08002274 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002275 /*
2276 * This case should only be hit for RAID 5/6 device replace. See
2277 * the comment in scrub_missing_raid56_pages() for details.
2278 */
2279 scrub_missing_raid56_pages(sblock);
2280 } else {
2281 for (index = 0; index < sblock->page_count; index++) {
2282 struct scrub_page *spage = sblock->pagev[index];
2283 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002284
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002285 ret = scrub_add_page_to_rd_bio(sctx, spage);
2286 if (ret) {
2287 scrub_block_put(sblock);
2288 return ret;
2289 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002290 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002291
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002292 if (force)
2293 scrub_submit(sctx);
2294 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002295
2296 /* last one frees, either here or in bio completion for last page */
2297 scrub_block_put(sblock);
2298 return 0;
2299}
2300
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002301static void scrub_bio_end_io(struct bio *bio)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002302{
2303 struct scrub_bio *sbio = bio->bi_private;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002304 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002305
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002306 sbio->status = bio->bi_status;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002307 sbio->bio = bio;
2308
Qu Wenruo0339ef22014-02-28 10:46:17 +08002309 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002310}
2311
2312static void scrub_bio_end_io_worker(struct btrfs_work *work)
2313{
2314 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002315 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002316 int i;
2317
Stefan Behrensff023aa2012-11-06 11:43:11 +01002318 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002319 if (sbio->status) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002320 for (i = 0; i < sbio->page_count; i++) {
2321 struct scrub_page *spage = sbio->pagev[i];
2322
2323 spage->io_error = 1;
2324 spage->sblock->no_io_error_seen = 0;
2325 }
2326 }
2327
2328 /* now complete the scrub_block items that have all pages completed */
2329 for (i = 0; i < sbio->page_count; i++) {
2330 struct scrub_page *spage = sbio->pagev[i];
2331 struct scrub_block *sblock = spage->sblock;
2332
2333 if (atomic_dec_and_test(&sblock->outstanding_pages))
2334 scrub_block_complete(sblock);
2335 scrub_block_put(sblock);
2336 }
2337
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002338 bio_put(sbio->bio);
2339 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002340 spin_lock(&sctx->list_lock);
2341 sbio->next_free = sctx->first_free;
2342 sctx->first_free = sbio->index;
2343 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002344
David Sterba2073c4c2017-03-31 17:12:51 +02002345 if (sctx->is_dev_replace && sctx->flush_all_writes) {
David Sterba3fb99302017-05-16 19:10:32 +02002346 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002347 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02002348 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002349 }
2350
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002351 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002352}
2353
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002354static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2355 unsigned long *bitmap,
2356 u64 start, u64 len)
2357{
Liu Bo972d7212017-04-03 13:45:33 -07002358 u64 offset;
David Sterba7736b0a2017-03-31 18:02:48 +02002359 u64 nsectors64;
2360 u32 nsectors;
Jeff Mahoneyda170662016-06-15 09:22:56 -04002361 int sectorsize = sparity->sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002362
2363 if (len >= sparity->stripe_len) {
2364 bitmap_set(bitmap, 0, sparity->nsectors);
2365 return;
2366 }
2367
2368 start -= sparity->logic_start;
Liu Bo972d7212017-04-03 13:45:33 -07002369 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2370 offset = div_u64(offset, sectorsize);
David Sterba7736b0a2017-03-31 18:02:48 +02002371 nsectors64 = div_u64(len, sectorsize);
2372
2373 ASSERT(nsectors64 < UINT_MAX);
2374 nsectors = (u32)nsectors64;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002375
2376 if (offset + nsectors <= sparity->nsectors) {
2377 bitmap_set(bitmap, offset, nsectors);
2378 return;
2379 }
2380
2381 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2382 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2383}
2384
2385static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2386 u64 start, u64 len)
2387{
2388 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2389}
2390
2391static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2392 u64 start, u64 len)
2393{
2394 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2395}
2396
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002397static void scrub_block_complete(struct scrub_block *sblock)
2398{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002399 int corrupted = 0;
2400
Stefan Behrensff023aa2012-11-06 11:43:11 +01002401 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002402 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002403 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002404 } else {
2405 /*
2406 * if has checksum error, write via repair mechanism in
2407 * dev replace case, otherwise write here in dev replace
2408 * case.
2409 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002410 corrupted = scrub_checksum(sblock);
2411 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002412 scrub_write_block_to_dev_replace(sblock);
2413 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002414
2415 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2416 u64 start = sblock->pagev[0]->logical;
2417 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2418 PAGE_SIZE;
2419
2420 scrub_parity_mark_sectors_error(sblock->sparity,
2421 start, end - start);
2422 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002423}
2424
Zhao Lei3b5753e2015-08-24 22:03:02 +08002425static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
Arne Jansena2de7332011-03-08 14:14:00 +01002426{
2427 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002428 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002429 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002430
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002431 while (!list_empty(&sctx->csum_list)) {
2432 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002433 struct btrfs_ordered_sum, list);
2434 if (sum->bytenr > logical)
2435 return 0;
2436 if (sum->bytenr + sum->len > logical)
2437 break;
2438
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002439 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002440 list_del(&sum->list);
2441 kfree(sum);
2442 sum = NULL;
2443 }
2444 if (!sum)
2445 return 0;
2446
David Sterba1d1bf922017-03-31 18:02:48 +02002447 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2448 ASSERT(index < UINT_MAX);
2449
David Sterba25cc1222017-05-16 19:10:41 +02002450 num_sectors = sum->len / sctx->fs_info->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002451 memcpy(csum, sum->sums + index, sctx->csum_size);
2452 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002453 list_del(&sum->list);
2454 kfree(sum);
2455 }
Miao Xief51a4a12013-06-19 10:36:09 +08002456 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002457}
2458
2459/* scrub extent tries to collect up to 64 kB for each bio */
Liu Bo6ca17652018-03-07 12:08:09 -07002460static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2461 u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002462 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002463 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002464{
2465 int ret;
2466 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002467 u32 blocksize;
2468
2469 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002470 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2471 blocksize = map->stripe_len;
2472 else
2473 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002474 spin_lock(&sctx->stat_lock);
2475 sctx->stat.data_extents_scrubbed++;
2476 sctx->stat.data_bytes_scrubbed += len;
2477 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002478 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002479 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2480 blocksize = map->stripe_len;
2481 else
2482 blocksize = sctx->fs_info->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002483 spin_lock(&sctx->stat_lock);
2484 sctx->stat.tree_extents_scrubbed++;
2485 sctx->stat.tree_bytes_scrubbed += len;
2486 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002487 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002488 blocksize = sctx->fs_info->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002489 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002490 }
Arne Jansena2de7332011-03-08 14:14:00 +01002491
2492 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002493 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002494 int have_csum = 0;
2495
2496 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2497 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002498 have_csum = scrub_find_csum(sctx, logical, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002499 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002500 ++sctx->stat.no_csum;
Arne Jansena2de7332011-03-08 14:14:00 +01002501 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002502 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002503 mirror_num, have_csum ? csum : NULL, 0,
2504 physical_for_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01002505 if (ret)
2506 return ret;
2507 len -= l;
2508 logical += l;
2509 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002510 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002511 }
2512 return 0;
2513}
2514
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002515static int scrub_pages_for_parity(struct scrub_parity *sparity,
2516 u64 logical, u64 len,
2517 u64 physical, struct btrfs_device *dev,
2518 u64 flags, u64 gen, int mirror_num, u8 *csum)
2519{
2520 struct scrub_ctx *sctx = sparity->sctx;
2521 struct scrub_block *sblock;
2522 int index;
2523
David Sterba58c4e172016-02-11 10:49:42 +01002524 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002525 if (!sblock) {
2526 spin_lock(&sctx->stat_lock);
2527 sctx->stat.malloc_errors++;
2528 spin_unlock(&sctx->stat_lock);
2529 return -ENOMEM;
2530 }
2531
2532 /* one ref inside this function, plus one for each page added to
2533 * a bio later on */
Elena Reshetova186debd2017-03-03 10:55:23 +02002534 refcount_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002535 sblock->sctx = sctx;
2536 sblock->no_io_error_seen = 1;
2537 sblock->sparity = sparity;
2538 scrub_parity_get(sparity);
2539
2540 for (index = 0; len > 0; index++) {
2541 struct scrub_page *spage;
2542 u64 l = min_t(u64, len, PAGE_SIZE);
2543
David Sterba58c4e172016-02-11 10:49:42 +01002544 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002545 if (!spage) {
2546leave_nomem:
2547 spin_lock(&sctx->stat_lock);
2548 sctx->stat.malloc_errors++;
2549 spin_unlock(&sctx->stat_lock);
2550 scrub_block_put(sblock);
2551 return -ENOMEM;
2552 }
2553 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2554 /* For scrub block */
2555 scrub_page_get(spage);
2556 sblock->pagev[index] = spage;
2557 /* For scrub parity */
2558 scrub_page_get(spage);
2559 list_add_tail(&spage->list, &sparity->spages);
2560 spage->sblock = sblock;
2561 spage->dev = dev;
2562 spage->flags = flags;
2563 spage->generation = gen;
2564 spage->logical = logical;
2565 spage->physical = physical;
2566 spage->mirror_num = mirror_num;
2567 if (csum) {
2568 spage->have_csum = 1;
2569 memcpy(spage->csum, csum, sctx->csum_size);
2570 } else {
2571 spage->have_csum = 0;
2572 }
2573 sblock->page_count++;
David Sterba58c4e172016-02-11 10:49:42 +01002574 spage->page = alloc_page(GFP_KERNEL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002575 if (!spage->page)
2576 goto leave_nomem;
2577 len -= l;
2578 logical += l;
2579 physical += l;
2580 }
2581
2582 WARN_ON(sblock->page_count == 0);
2583 for (index = 0; index < sblock->page_count; index++) {
2584 struct scrub_page *spage = sblock->pagev[index];
2585 int ret;
2586
2587 ret = scrub_add_page_to_rd_bio(sctx, spage);
2588 if (ret) {
2589 scrub_block_put(sblock);
2590 return ret;
2591 }
2592 }
2593
2594 /* last one frees, either here or in bio completion for last page */
2595 scrub_block_put(sblock);
2596 return 0;
2597}
2598
2599static int scrub_extent_for_parity(struct scrub_parity *sparity,
2600 u64 logical, u64 len,
2601 u64 physical, struct btrfs_device *dev,
2602 u64 flags, u64 gen, int mirror_num)
2603{
2604 struct scrub_ctx *sctx = sparity->sctx;
2605 int ret;
2606 u8 csum[BTRFS_CSUM_SIZE];
2607 u32 blocksize;
2608
Anand Jaine6e674b2017-12-04 12:54:54 +08002609 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
Omar Sandoval4a770892015-06-19 11:52:52 -07002610 scrub_parity_mark_sectors_error(sparity, logical, len);
2611 return 0;
2612 }
2613
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002614 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Liu Bo6ca17652018-03-07 12:08:09 -07002615 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002616 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Liu Bo6ca17652018-03-07 12:08:09 -07002617 blocksize = sparity->stripe_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002618 } else {
David Sterba25cc1222017-05-16 19:10:41 +02002619 blocksize = sctx->fs_info->sectorsize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002620 WARN_ON(1);
2621 }
2622
2623 while (len) {
2624 u64 l = min_t(u64, len, blocksize);
2625 int have_csum = 0;
2626
2627 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2628 /* push csums to sbio */
Zhao Lei3b5753e2015-08-24 22:03:02 +08002629 have_csum = scrub_find_csum(sctx, logical, csum);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002630 if (have_csum == 0)
2631 goto skip;
2632 }
2633 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2634 flags, gen, mirror_num,
2635 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002636 if (ret)
2637 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002638skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002639 len -= l;
2640 logical += l;
2641 physical += l;
2642 }
2643 return 0;
2644}
2645
Wang Shilong3b080b22014-04-01 18:01:43 +08002646/*
2647 * Given a physical address, this will calculate it's
2648 * logical offset. if this is a parity stripe, it will return
2649 * the most left data stripe's logical offset.
2650 *
2651 * return 0 if it is a data stripe, 1 means parity stripe.
2652 */
2653static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002654 struct map_lookup *map, u64 *offset,
2655 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002656{
2657 int i;
2658 int j = 0;
2659 u64 stripe_nr;
2660 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002661 u32 stripe_index;
2662 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002663
2664 last_offset = (physical - map->stripes[num].physical) *
2665 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002666 if (stripe_start)
2667 *stripe_start = last_offset;
2668
Wang Shilong3b080b22014-04-01 18:01:43 +08002669 *offset = last_offset;
2670 for (i = 0; i < nr_data_stripes(map); i++) {
2671 *offset = last_offset + i * map->stripe_len;
2672
Liu Bo42c61ab2017-04-03 13:45:24 -07002673 stripe_nr = div64_u64(*offset, map->stripe_len);
David Sterbab8b93ad2015-01-16 17:26:13 +01002674 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002675
2676 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002677 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002678 /* calculate which stripe this data locates */
2679 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002680 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002681 if (stripe_index == num)
2682 return 0;
2683 if (stripe_index < num)
2684 j++;
2685 }
2686 *offset = last_offset + j * map->stripe_len;
2687 return 1;
2688}
2689
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002690static void scrub_free_parity(struct scrub_parity *sparity)
2691{
2692 struct scrub_ctx *sctx = sparity->sctx;
2693 struct scrub_page *curr, *next;
2694 int nbits;
2695
2696 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2697 if (nbits) {
2698 spin_lock(&sctx->stat_lock);
2699 sctx->stat.read_errors += nbits;
2700 sctx->stat.uncorrectable_errors += nbits;
2701 spin_unlock(&sctx->stat_lock);
2702 }
2703
2704 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2705 list_del_init(&curr->list);
2706 scrub_page_put(curr);
2707 }
2708
2709 kfree(sparity);
2710}
2711
Zhao Lei20b2e302015-06-04 20:09:15 +08002712static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2713{
2714 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2715 work);
2716 struct scrub_ctx *sctx = sparity->sctx;
2717
2718 scrub_free_parity(sparity);
2719 scrub_pending_bio_dec(sctx);
2720}
2721
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002722static void scrub_parity_bio_endio(struct bio *bio)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002723{
2724 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002725 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002726
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002727 if (bio->bi_status)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002728 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2729 sparity->nsectors);
2730
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002731 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08002732
2733 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2734 scrub_parity_bio_endio_worker, NULL, NULL);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002735 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002736}
2737
2738static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2739{
2740 struct scrub_ctx *sctx = sparity->sctx;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002741 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002742 struct bio *bio;
2743 struct btrfs_raid_bio *rbio;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002744 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002745 u64 length;
2746 int ret;
2747
2748 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2749 sparity->nsectors))
2750 goto out;
2751
Zhao Leia0dd59d2015-07-21 15:42:26 +08002752 length = sparity->logic_end - sparity->logic_start;
Qu Wenruoae6529c2017-03-29 09:33:21 +08002753
2754 btrfs_bio_counter_inc_blocked(fs_info);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002755 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
David Sterba825ad4c2017-03-28 14:45:22 +02002756 &length, &bbio);
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002757 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002758 goto bbio_out;
2759
David Sterbac5e4c3d2017-06-12 17:29:41 +02002760 bio = btrfs_io_bio_alloc(0);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002761 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2762 bio->bi_private = sparity;
2763 bio->bi_end_io = scrub_parity_bio_endio;
2764
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04002765 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002766 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002767 sparity->dbitmap,
2768 sparity->nsectors);
2769 if (!rbio)
2770 goto rbio_out;
2771
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002772 scrub_pending_bio_inc(sctx);
2773 raid56_parity_submit_scrub_rbio(rbio);
2774 return;
2775
2776rbio_out:
2777 bio_put(bio);
2778bbio_out:
Qu Wenruoae6529c2017-03-29 09:33:21 +08002779 btrfs_bio_counter_dec(fs_info);
Zhao Lei6e9606d2015-01-20 15:11:34 +08002780 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002781 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2782 sparity->nsectors);
2783 spin_lock(&sctx->stat_lock);
2784 sctx->stat.malloc_errors++;
2785 spin_unlock(&sctx->stat_lock);
2786out:
2787 scrub_free_parity(sparity);
2788}
2789
2790static inline int scrub_calc_parity_bitmap_len(int nsectors)
2791{
Zhao Leibfca9a62014-12-08 19:55:57 +08002792 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002793}
2794
2795static void scrub_parity_get(struct scrub_parity *sparity)
2796{
Elena Reshetova78a76452017-03-03 10:55:24 +02002797 refcount_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002798}
2799
2800static void scrub_parity_put(struct scrub_parity *sparity)
2801{
Elena Reshetova78a76452017-03-03 10:55:24 +02002802 if (!refcount_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002803 return;
2804
2805 scrub_parity_check_and_repair(sparity);
2806}
2807
2808static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2809 struct map_lookup *map,
2810 struct btrfs_device *sdev,
2811 struct btrfs_path *path,
2812 u64 logic_start,
2813 u64 logic_end)
2814{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04002815 struct btrfs_fs_info *fs_info = sctx->fs_info;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002816 struct btrfs_root *root = fs_info->extent_root;
2817 struct btrfs_root *csum_root = fs_info->csum_root;
2818 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07002819 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002820 u64 flags;
2821 int ret;
2822 int slot;
2823 struct extent_buffer *l;
2824 struct btrfs_key key;
2825 u64 generation;
2826 u64 extent_logical;
2827 u64 extent_physical;
2828 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07002829 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002830 struct btrfs_device *extent_dev;
2831 struct scrub_parity *sparity;
2832 int nsectors;
2833 int bitmap_len;
2834 int extent_mirror_num;
2835 int stop_loop = 0;
2836
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002837 nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002838 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2839 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2840 GFP_NOFS);
2841 if (!sparity) {
2842 spin_lock(&sctx->stat_lock);
2843 sctx->stat.malloc_errors++;
2844 spin_unlock(&sctx->stat_lock);
2845 return -ENOMEM;
2846 }
2847
2848 sparity->stripe_len = map->stripe_len;
2849 sparity->nsectors = nsectors;
2850 sparity->sctx = sctx;
2851 sparity->scrub_dev = sdev;
2852 sparity->logic_start = logic_start;
2853 sparity->logic_end = logic_end;
Elena Reshetova78a76452017-03-03 10:55:24 +02002854 refcount_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002855 INIT_LIST_HEAD(&sparity->spages);
2856 sparity->dbitmap = sparity->bitmap;
2857 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2858
2859 ret = 0;
2860 while (logic_start < logic_end) {
2861 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2862 key.type = BTRFS_METADATA_ITEM_KEY;
2863 else
2864 key.type = BTRFS_EXTENT_ITEM_KEY;
2865 key.objectid = logic_start;
2866 key.offset = (u64)-1;
2867
2868 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2869 if (ret < 0)
2870 goto out;
2871
2872 if (ret > 0) {
2873 ret = btrfs_previous_extent_item(root, path, 0);
2874 if (ret < 0)
2875 goto out;
2876 if (ret > 0) {
2877 btrfs_release_path(path);
2878 ret = btrfs_search_slot(NULL, root, &key,
2879 path, 0, 0);
2880 if (ret < 0)
2881 goto out;
2882 }
2883 }
2884
2885 stop_loop = 0;
2886 while (1) {
2887 u64 bytes;
2888
2889 l = path->nodes[0];
2890 slot = path->slots[0];
2891 if (slot >= btrfs_header_nritems(l)) {
2892 ret = btrfs_next_leaf(root, path);
2893 if (ret == 0)
2894 continue;
2895 if (ret < 0)
2896 goto out;
2897
2898 stop_loop = 1;
2899 break;
2900 }
2901 btrfs_item_key_to_cpu(l, &key, slot);
2902
Zhao Leid7cad232015-07-22 13:14:48 +08002903 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2904 key.type != BTRFS_METADATA_ITEM_KEY)
2905 goto next;
2906
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002907 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002908 bytes = fs_info->nodesize;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002909 else
2910 bytes = key.offset;
2911
2912 if (key.objectid + bytes <= logic_start)
2913 goto next;
2914
Zhao Leia0dd59d2015-07-21 15:42:26 +08002915 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002916 stop_loop = 1;
2917 break;
2918 }
2919
2920 while (key.objectid >= logic_start + map->stripe_len)
2921 logic_start += map->stripe_len;
2922
2923 extent = btrfs_item_ptr(l, slot,
2924 struct btrfs_extent_item);
2925 flags = btrfs_extent_flags(l, extent);
2926 generation = btrfs_extent_generation(l, extent);
2927
Zhao Leia323e812015-07-23 12:29:49 +08002928 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2929 (key.objectid < logic_start ||
2930 key.objectid + bytes >
2931 logic_start + map->stripe_len)) {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002932 btrfs_err(fs_info,
2933 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Zhao Leia323e812015-07-23 12:29:49 +08002934 key.objectid, logic_start);
Zhao Lei9799d2c32015-08-25 21:31:40 +08002935 spin_lock(&sctx->stat_lock);
2936 sctx->stat.uncorrectable_errors++;
2937 spin_unlock(&sctx->stat_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002938 goto next;
2939 }
2940again:
2941 extent_logical = key.objectid;
2942 extent_len = bytes;
2943
2944 if (extent_logical < logic_start) {
2945 extent_len -= logic_start - extent_logical;
2946 extent_logical = logic_start;
2947 }
2948
2949 if (extent_logical + extent_len >
2950 logic_start + map->stripe_len)
2951 extent_len = logic_start + map->stripe_len -
2952 extent_logical;
2953
2954 scrub_parity_mark_sectors_data(sparity, extent_logical,
2955 extent_len);
2956
Omar Sandoval4a770892015-06-19 11:52:52 -07002957 mapped_length = extent_len;
Zhao Leif1fee652016-05-17 17:37:38 +08002958 bbio = NULL;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02002959 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2960 extent_logical, &mapped_length, &bbio,
2961 0);
Omar Sandoval4a770892015-06-19 11:52:52 -07002962 if (!ret) {
2963 if (!bbio || mapped_length < extent_len)
2964 ret = -EIO;
2965 }
2966 if (ret) {
2967 btrfs_put_bbio(bbio);
2968 goto out;
2969 }
2970 extent_physical = bbio->stripes[0].physical;
2971 extent_mirror_num = bbio->mirror_num;
2972 extent_dev = bbio->stripes[0].dev;
2973 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002974
2975 ret = btrfs_lookup_csums_range(csum_root,
2976 extent_logical,
2977 extent_logical + extent_len - 1,
2978 &sctx->csum_list, 1);
2979 if (ret)
2980 goto out;
2981
2982 ret = scrub_extent_for_parity(sparity, extent_logical,
2983 extent_len,
2984 extent_physical,
2985 extent_dev, flags,
2986 generation,
2987 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08002988
2989 scrub_free_csums(sctx);
2990
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002991 if (ret)
2992 goto out;
2993
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002994 if (extent_logical + extent_len <
2995 key.objectid + bytes) {
2996 logic_start += map->stripe_len;
2997
2998 if (logic_start >= logic_end) {
2999 stop_loop = 1;
3000 break;
3001 }
3002
3003 if (logic_start < key.objectid + bytes) {
3004 cond_resched();
3005 goto again;
3006 }
3007 }
3008next:
3009 path->slots[0]++;
3010 }
3011
3012 btrfs_release_path(path);
3013
3014 if (stop_loop)
3015 break;
3016
3017 logic_start += map->stripe_len;
3018 }
3019out:
3020 if (ret < 0)
3021 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003022 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003023 scrub_parity_put(sparity);
3024 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003025 mutex_lock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003026 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003027 mutex_unlock(&sctx->wr_lock);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003028
3029 btrfs_release_path(path);
3030 return ret < 0 ? ret : 0;
3031}
3032
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003033static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003034 struct map_lookup *map,
3035 struct btrfs_device *scrub_dev,
Omar Sandoval32934282018-08-14 11:09:52 -07003036 int num, u64 base, u64 length)
Arne Jansena2de7332011-03-08 14:14:00 +01003037{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003038 struct btrfs_path *path, *ppath;
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003039 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003040 struct btrfs_root *root = fs_info->extent_root;
3041 struct btrfs_root *csum_root = fs_info->csum_root;
3042 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003043 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003044 u64 flags;
3045 int ret;
3046 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003047 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003048 struct extent_buffer *l;
Arne Jansena2de7332011-03-08 14:14:00 +01003049 u64 physical;
3050 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003051 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003052 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003053 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003054 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003055 struct reada_control *reada1;
3056 struct reada_control *reada2;
David Sterbae6c11f92016-03-24 18:00:53 +01003057 struct btrfs_key key;
Arne Jansen7a262852011-06-10 12:39:23 +02003058 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003059 u64 increment = map->stripe_len;
3060 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003061 u64 extent_logical;
3062 u64 extent_physical;
3063 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003064 u64 stripe_logical;
3065 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003066 struct btrfs_device *extent_dev;
3067 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003068 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003069
Wang Shilong3b080b22014-04-01 18:01:43 +08003070 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003071 offset = 0;
Liu Bo42c61ab2017-04-03 13:45:24 -07003072 nstripes = div64_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003073 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3074 offset = map->stripe_len * num;
3075 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003076 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003077 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3078 int factor = map->num_stripes / map->sub_stripes;
3079 offset = map->stripe_len * (num / map->sub_stripes);
3080 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003081 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003082 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3083 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003084 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003085 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3086 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003087 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003088 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003089 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003090 increment = map->stripe_len * nr_data_stripes(map);
3091 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003092 } else {
3093 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003094 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003095 }
3096
3097 path = btrfs_alloc_path();
3098 if (!path)
3099 return -ENOMEM;
3100
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003101 ppath = btrfs_alloc_path();
3102 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003103 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003104 return -ENOMEM;
3105 }
3106
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003107 /*
3108 * work on commit root. The related disk blocks are static as
3109 * long as COW is applied. This means, it is save to rewrite
3110 * them to repair disk errors without any race conditions
3111 */
Arne Jansena2de7332011-03-08 14:14:00 +01003112 path->search_commit_root = 1;
3113 path->skip_locking = 1;
3114
Gui Hecheng063c54d2015-01-09 09:39:40 +08003115 ppath->search_commit_root = 1;
3116 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003117 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003118 * trigger the readahead for extent tree csum tree and wait for
3119 * completion. During readahead, the scrub is officially paused
3120 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003121 */
3122 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003123 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003124 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003125 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003126 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003127 logic_end += base;
3128 } else {
3129 logic_end = logical + increment * nstripes;
3130 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003131 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003132 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003133 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003134
Arne Jansen7a262852011-06-10 12:39:23 +02003135 /* FIXME it might be better to start readahead at commit root */
David Sterbae6c11f92016-03-24 18:00:53 +01003136 key.objectid = logical;
3137 key.type = BTRFS_EXTENT_ITEM_KEY;
3138 key.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003139 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003140 key_end.type = BTRFS_METADATA_ITEM_KEY;
3141 key_end.offset = (u64)-1;
David Sterbae6c11f92016-03-24 18:00:53 +01003142 reada1 = btrfs_reada_add(root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003143
David Sterbae6c11f92016-03-24 18:00:53 +01003144 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3145 key.type = BTRFS_EXTENT_CSUM_KEY;
3146 key.offset = logical;
Arne Jansen7a262852011-06-10 12:39:23 +02003147 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3148 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003149 key_end.offset = logic_end;
David Sterbae6c11f92016-03-24 18:00:53 +01003150 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003151
Arne Jansen7a262852011-06-10 12:39:23 +02003152 if (!IS_ERR(reada1))
3153 btrfs_reada_wait(reada1);
3154 if (!IS_ERR(reada2))
3155 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003156
Arne Jansena2de7332011-03-08 14:14:00 +01003157
3158 /*
3159 * collect all data csums for the stripe to avoid seeking during
3160 * the scrub. This might currently (crc32) end up to be about 1MB
3161 */
Arne Jansene7786c32011-05-28 20:58:38 +00003162 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003163
Arne Jansena2de7332011-03-08 14:14:00 +01003164 /*
3165 * now find all extents for each stripe and scrub them
3166 */
Arne Jansena2de7332011-03-08 14:14:00 +01003167 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003168 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003169 /*
3170 * canceled?
3171 */
3172 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003173 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003174 ret = -ECANCELED;
3175 goto out;
3176 }
3177 /*
3178 * check to see if we have to pause
3179 */
3180 if (atomic_read(&fs_info->scrub_pause_req)) {
3181 /* push queued extents */
David Sterba2073c4c2017-03-31 17:12:51 +02003182 sctx->flush_all_writes = true;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003183 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003184 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003185 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003186 mutex_unlock(&sctx->wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003187 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003188 atomic_read(&sctx->bios_in_flight) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003189 sctx->flush_all_writes = false;
Wang Shilong3cb09292013-12-04 21:15:19 +08003190 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003191 }
3192
Zhao Leif2f66a22015-07-21 12:22:29 +08003193 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3194 ret = get_raid56_logic_offset(physical, num, map,
3195 &logical,
3196 &stripe_logical);
3197 logical += base;
3198 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003199 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003200 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003201 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003202 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3203 ppath, stripe_logical,
3204 stripe_end);
3205 if (ret)
3206 goto out;
3207 goto skip;
3208 }
3209 }
3210
Wang Shilong7c76edb2014-01-12 21:38:32 +08003211 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3212 key.type = BTRFS_METADATA_ITEM_KEY;
3213 else
3214 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003215 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003216 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003217
3218 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3219 if (ret < 0)
3220 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003221
Arne Jansen8c510322011-06-03 10:09:26 +02003222 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003223 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003224 if (ret < 0)
3225 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003226 if (ret > 0) {
3227 /* there's no smaller item, so stick with the
3228 * larger one */
3229 btrfs_release_path(path);
3230 ret = btrfs_search_slot(NULL, root, &key,
3231 path, 0, 0);
3232 if (ret < 0)
3233 goto out;
3234 }
Arne Jansena2de7332011-03-08 14:14:00 +01003235 }
3236
Liu Bo625f1c8d2013-04-27 02:56:57 +00003237 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003238 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003239 u64 bytes;
3240
Arne Jansena2de7332011-03-08 14:14:00 +01003241 l = path->nodes[0];
3242 slot = path->slots[0];
3243 if (slot >= btrfs_header_nritems(l)) {
3244 ret = btrfs_next_leaf(root, path);
3245 if (ret == 0)
3246 continue;
3247 if (ret < 0)
3248 goto out;
3249
Liu Bo625f1c8d2013-04-27 02:56:57 +00003250 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003251 break;
3252 }
3253 btrfs_item_key_to_cpu(l, &key, slot);
3254
Zhao Leid7cad232015-07-22 13:14:48 +08003255 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3256 key.type != BTRFS_METADATA_ITEM_KEY)
3257 goto next;
3258
Josef Bacik3173a182013-03-07 14:22:04 -05003259 if (key.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003260 bytes = fs_info->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003261 else
3262 bytes = key.offset;
3263
3264 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003265 goto next;
3266
Liu Bo625f1c8d2013-04-27 02:56:57 +00003267 if (key.objectid >= logical + map->stripe_len) {
3268 /* out of this device extent */
3269 if (key.objectid >= logic_end)
3270 stop_loop = 1;
3271 break;
3272 }
Arne Jansena2de7332011-03-08 14:14:00 +01003273
3274 extent = btrfs_item_ptr(l, slot,
3275 struct btrfs_extent_item);
3276 flags = btrfs_extent_flags(l, extent);
3277 generation = btrfs_extent_generation(l, extent);
3278
Zhao Leia323e812015-07-23 12:29:49 +08003279 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3280 (key.objectid < logical ||
3281 key.objectid + bytes >
3282 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003283 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003284 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003285 key.objectid, logical);
Zhao Lei9799d2c32015-08-25 21:31:40 +08003286 spin_lock(&sctx->stat_lock);
3287 sctx->stat.uncorrectable_errors++;
3288 spin_unlock(&sctx->stat_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003289 goto next;
3290 }
3291
Liu Bo625f1c8d2013-04-27 02:56:57 +00003292again:
3293 extent_logical = key.objectid;
3294 extent_len = bytes;
3295
Arne Jansena2de7332011-03-08 14:14:00 +01003296 /*
3297 * trim extent to this stripe
3298 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003299 if (extent_logical < logical) {
3300 extent_len -= logical - extent_logical;
3301 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003302 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003303 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003304 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003305 extent_len = logical + map->stripe_len -
3306 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003307 }
3308
Liu Bo625f1c8d2013-04-27 02:56:57 +00003309 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003310 extent_dev = scrub_dev;
3311 extent_mirror_num = mirror_num;
Omar Sandoval32934282018-08-14 11:09:52 -07003312 if (sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003313 scrub_remap_extent(fs_info, extent_logical,
3314 extent_len, &extent_physical,
3315 &extent_dev,
3316 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003317
Zhao Leife8cf652015-07-22 13:14:47 +08003318 ret = btrfs_lookup_csums_range(csum_root,
3319 extent_logical,
3320 extent_logical +
3321 extent_len - 1,
3322 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003323 if (ret)
3324 goto out;
3325
Liu Bo6ca17652018-03-07 12:08:09 -07003326 ret = scrub_extent(sctx, map, extent_logical, extent_len,
Liu Bo625f1c8d2013-04-27 02:56:57 +00003327 extent_physical, extent_dev, flags,
3328 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003329 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003330
3331 scrub_free_csums(sctx);
3332
Liu Bo625f1c8d2013-04-27 02:56:57 +00003333 if (ret)
3334 goto out;
3335
3336 if (extent_logical + extent_len <
3337 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003338 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003339 /*
3340 * loop until we find next data stripe
3341 * or we have finished all stripes.
3342 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003343loop:
3344 physical += map->stripe_len;
3345 ret = get_raid56_logic_offset(physical,
3346 num, map, &logical,
3347 &stripe_logical);
3348 logical += base;
3349
3350 if (ret && physical < physical_end) {
3351 stripe_logical += base;
3352 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003353 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003354 ret = scrub_raid56_parity(sctx,
3355 map, scrub_dev, ppath,
3356 stripe_logical,
3357 stripe_end);
3358 if (ret)
3359 goto out;
3360 goto loop;
3361 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003362 } else {
3363 physical += map->stripe_len;
3364 logical += increment;
3365 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003366 if (logical < key.objectid + bytes) {
3367 cond_resched();
3368 goto again;
3369 }
3370
Wang Shilong3b080b22014-04-01 18:01:43 +08003371 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003372 stop_loop = 1;
3373 break;
3374 }
3375 }
Arne Jansena2de7332011-03-08 14:14:00 +01003376next:
3377 path->slots[0]++;
3378 }
Chris Mason71267332011-05-23 06:30:52 -04003379 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003380skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003381 logical += increment;
3382 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003383 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003384 if (stop_loop)
3385 sctx->stat.last_physical = map->stripes[num].physical +
3386 length;
3387 else
3388 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003389 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003390 if (stop_loop)
3391 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003392 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003393out:
Arne Jansena2de7332011-03-08 14:14:00 +01003394 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003395 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003396 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003397 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003398 mutex_unlock(&sctx->wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003399
Arne Jansene7786c32011-05-28 20:58:38 +00003400 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003401 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003402 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003403 return ret < 0 ? ret : 0;
3404}
3405
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003406static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003407 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003408 u64 chunk_offset, u64 length,
Filipe Manana020d5b72015-11-19 10:57:20 +00003409 u64 dev_offset,
Omar Sandoval32934282018-08-14 11:09:52 -07003410 struct btrfs_block_group_cache *cache)
Arne Jansena2de7332011-03-08 14:14:00 +01003411{
Jeff Mahoneyfb456252016-06-22 18:54:56 -04003412 struct btrfs_fs_info *fs_info = sctx->fs_info;
3413 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003414 struct map_lookup *map;
3415 struct extent_map *em;
3416 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003417 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003418
3419 read_lock(&map_tree->map_tree.lock);
3420 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3421 read_unlock(&map_tree->map_tree.lock);
3422
Filipe Manana020d5b72015-11-19 10:57:20 +00003423 if (!em) {
3424 /*
3425 * Might have been an unused block group deleted by the cleaner
3426 * kthread or relocation.
3427 */
3428 spin_lock(&cache->lock);
3429 if (!cache->removed)
3430 ret = -EINVAL;
3431 spin_unlock(&cache->lock);
3432
3433 return ret;
3434 }
Arne Jansena2de7332011-03-08 14:14:00 +01003435
Jeff Mahoney95617d62015-06-03 10:55:48 -04003436 map = em->map_lookup;
Arne Jansena2de7332011-03-08 14:14:00 +01003437 if (em->start != chunk_offset)
3438 goto out;
3439
3440 if (em->len < length)
3441 goto out;
3442
3443 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003444 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003445 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003446 ret = scrub_stripe(sctx, map, scrub_dev, i,
Omar Sandoval32934282018-08-14 11:09:52 -07003447 chunk_offset, length);
Arne Jansena2de7332011-03-08 14:14:00 +01003448 if (ret)
3449 goto out;
3450 }
3451 }
3452out:
3453 free_extent_map(em);
3454
3455 return ret;
3456}
3457
3458static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003459int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Omar Sandoval32934282018-08-14 11:09:52 -07003460 struct btrfs_device *scrub_dev, u64 start, u64 end)
Arne Jansena2de7332011-03-08 14:14:00 +01003461{
3462 struct btrfs_dev_extent *dev_extent = NULL;
3463 struct btrfs_path *path;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003464 struct btrfs_fs_info *fs_info = sctx->fs_info;
3465 struct btrfs_root *root = fs_info->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003466 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003467 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003468 int ret = 0;
Zhaolei76a8efa2015-11-17 18:46:17 +08003469 int ro_set;
Arne Jansena2de7332011-03-08 14:14:00 +01003470 int slot;
3471 struct extent_buffer *l;
3472 struct btrfs_key key;
3473 struct btrfs_key found_key;
3474 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003475 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003476
3477 path = btrfs_alloc_path();
3478 if (!path)
3479 return -ENOMEM;
3480
David Sterbae4058b52015-11-27 16:31:35 +01003481 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +01003482 path->search_commit_root = 1;
3483 path->skip_locking = 1;
3484
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003485 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003486 key.offset = 0ull;
3487 key.type = BTRFS_DEV_EXTENT_KEY;
3488
Arne Jansena2de7332011-03-08 14:14:00 +01003489 while (1) {
3490 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3491 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003492 break;
3493 if (ret > 0) {
3494 if (path->slots[0] >=
3495 btrfs_header_nritems(path->nodes[0])) {
3496 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003497 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003498 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003499 if (ret > 0) {
3500 ret = 0;
3501 break;
3502 }
3503 } else {
3504 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003505 }
3506 }
Arne Jansena2de7332011-03-08 14:14:00 +01003507
3508 l = path->nodes[0];
3509 slot = path->slots[0];
3510
3511 btrfs_item_key_to_cpu(l, &found_key, slot);
3512
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003513 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003514 break;
3515
David Sterba962a2982014-06-04 18:41:45 +02003516 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003517 break;
3518
3519 if (found_key.offset >= end)
3520 break;
3521
3522 if (found_key.offset < key.offset)
3523 break;
3524
3525 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3526 length = btrfs_dev_extent_length(l, dev_extent);
3527
Qu Wenruoced96ed2014-06-19 10:42:51 +08003528 if (found_key.offset + length <= start)
3529 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003530
Arne Jansena2de7332011-03-08 14:14:00 +01003531 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3532
3533 /*
3534 * get a reference on the corresponding block group to prevent
3535 * the chunk from going away while we scrub it
3536 */
3537 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003538
3539 /* some chunks are removed but not committed to disk yet,
3540 * continue scrubbing */
3541 if (!cache)
3542 goto skip;
3543
Zhaolei55e3a602015-08-05 16:43:30 +08003544 /*
3545 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3546 * to avoid deadlock caused by:
3547 * btrfs_inc_block_group_ro()
3548 * -> btrfs_wait_for_commit()
3549 * -> btrfs_commit_transaction()
3550 * -> btrfs_scrub_pause()
3551 */
3552 scrub_pause_on(fs_info);
Nikolay Borisovc83488a2018-06-20 15:49:14 +03003553 ret = btrfs_inc_block_group_ro(cache);
Omar Sandoval32934282018-08-14 11:09:52 -07003554 if (!ret && sctx->is_dev_replace) {
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003555 /*
3556 * If we are doing a device replace wait for any tasks
Andrea Gelmini52042d82018-11-28 12:05:13 +01003557 * that started delalloc right before we set the block
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003558 * group to RO mode, as they might have just allocated
3559 * an extent from it or decided they could do a nocow
3560 * write. And if any such tasks did that, wait for their
3561 * ordered extents to complete and then commit the
3562 * current transaction, so that we can later see the new
3563 * extent items in the extent tree - the ordered extents
3564 * create delayed data references (for cow writes) when
3565 * they complete, which will be run and insert the
3566 * corresponding extent items into the extent tree when
3567 * we commit the transaction they used when running
3568 * inode.c:btrfs_finish_ordered_io(). We later use
3569 * the commit root of the extent tree to find extents
3570 * to copy from the srcdev into the tgtdev, and we don't
3571 * want to miss any new extents.
3572 */
3573 btrfs_wait_block_group_reservations(cache);
3574 btrfs_wait_nocow_writers(cache);
Chris Mason6374e57a2017-06-23 09:48:21 -07003575 ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003576 cache->key.objectid,
3577 cache->key.offset);
3578 if (ret > 0) {
3579 struct btrfs_trans_handle *trans;
3580
3581 trans = btrfs_join_transaction(root);
3582 if (IS_ERR(trans))
3583 ret = PTR_ERR(trans);
3584 else
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003585 ret = btrfs_commit_transaction(trans);
Filipe Mananaf0e9b7d2016-05-14 09:12:53 +01003586 if (ret) {
3587 scrub_pause_off(fs_info);
3588 btrfs_put_block_group(cache);
3589 break;
3590 }
3591 }
3592 }
Zhaolei55e3a602015-08-05 16:43:30 +08003593 scrub_pause_off(fs_info);
Zhaolei76a8efa2015-11-17 18:46:17 +08003594
3595 if (ret == 0) {
3596 ro_set = 1;
3597 } else if (ret == -ENOSPC) {
3598 /*
3599 * btrfs_inc_block_group_ro return -ENOSPC when it
3600 * failed in creating new chunk for metadata.
3601 * It is not a problem for scrub/replace, because
3602 * metadata are always cowed, and our scrub paused
3603 * commit_transactions.
3604 */
3605 ro_set = 0;
3606 } else {
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003607 btrfs_warn(fs_info,
David Sterba913e1532017-07-13 15:32:18 +02003608 "failed setting block group ro: %d", ret);
Zhaolei55e3a602015-08-05 16:43:30 +08003609 btrfs_put_block_group(cache);
3610 break;
3611 }
3612
David Sterbacb5583d2018-09-07 16:11:23 +02003613 down_write(&fs_info->dev_replace.rwsem);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003614 dev_replace->cursor_right = found_key.offset + length;
3615 dev_replace->cursor_left = found_key.offset;
3616 dev_replace->item_needs_writeback = 1;
David Sterbacb5583d2018-09-07 16:11:23 +02003617 up_write(&dev_replace->rwsem);
3618
Zhao Lei8c204c92015-08-19 15:02:40 +08003619 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
Omar Sandoval32934282018-08-14 11:09:52 -07003620 found_key.offset, cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003621
3622 /*
3623 * flush, submit all pending read and write bios, afterwards
3624 * wait for them.
3625 * Note that in the dev replace case, a read request causes
3626 * write requests that are submitted in the read completion
3627 * worker. Therefore in the current situation, it is required
3628 * that all write requests are flushed, so that all read and
3629 * write requests are really completed when bios_in_flight
3630 * changes to 0.
3631 */
David Sterba2073c4c2017-03-31 17:12:51 +02003632 sctx->flush_all_writes = true;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003633 scrub_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003634 mutex_lock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003635 scrub_wr_submit(sctx);
David Sterba3fb99302017-05-16 19:10:32 +02003636 mutex_unlock(&sctx->wr_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003637
3638 wait_event(sctx->list_wait,
3639 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003640
3641 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003642
3643 /*
3644 * must be called before we decrease @scrub_paused.
3645 * make sure we don't block transaction commit while
3646 * we are waiting pending workers finished.
3647 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003648 wait_event(sctx->list_wait,
3649 atomic_read(&sctx->workers_pending) == 0);
David Sterba2073c4c2017-03-31 17:12:51 +02003650 sctx->flush_all_writes = false;
Wang Shilong12cf9372014-02-19 19:24:17 +08003651
Zhaoleib708ce92015-08-05 16:43:29 +08003652 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003653
David Sterbacb5583d2018-09-07 16:11:23 +02003654 down_write(&fs_info->dev_replace.rwsem);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003655 dev_replace->cursor_left = dev_replace->cursor_right;
3656 dev_replace->item_needs_writeback = 1;
David Sterbacb5583d2018-09-07 16:11:23 +02003657 up_write(&fs_info->dev_replace.rwsem);
Filipe Manana1a1a8b72016-05-14 19:44:40 +01003658
Zhaolei76a8efa2015-11-17 18:46:17 +08003659 if (ro_set)
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003660 btrfs_dec_block_group_ro(cache);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003661
Filipe Manana758f2df2015-11-19 11:45:48 +00003662 /*
3663 * We might have prevented the cleaner kthread from deleting
3664 * this block group if it was already unused because we raced
3665 * and set it to RO mode first. So add it back to the unused
3666 * list, otherwise it might not ever be deleted unless a manual
3667 * balance is triggered or it becomes used and unused again.
3668 */
3669 spin_lock(&cache->lock);
3670 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3671 btrfs_block_group_used(&cache->item) == 0) {
3672 spin_unlock(&cache->lock);
Qu Wenruo031f24d2018-05-22 16:43:47 +08003673 btrfs_mark_bg_unused(cache);
Filipe Manana758f2df2015-11-19 11:45:48 +00003674 } else {
3675 spin_unlock(&cache->lock);
3676 }
3677
Arne Jansena2de7332011-03-08 14:14:00 +01003678 btrfs_put_block_group(cache);
3679 if (ret)
3680 break;
Omar Sandoval32934282018-08-14 11:09:52 -07003681 if (sctx->is_dev_replace &&
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003682 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003683 ret = -EIO;
3684 break;
3685 }
3686 if (sctx->stat.malloc_errors > 0) {
3687 ret = -ENOMEM;
3688 break;
3689 }
Qu Wenruoced96ed2014-06-19 10:42:51 +08003690skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003691 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003692 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003693 }
3694
Arne Jansena2de7332011-03-08 14:14:00 +01003695 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003696
Zhaolei55e3a602015-08-05 16:43:30 +08003697 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003698}
3699
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003700static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3701 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003702{
3703 int i;
3704 u64 bytenr;
3705 u64 gen;
3706 int ret;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003707 struct btrfs_fs_info *fs_info = sctx->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003708
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003709 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003710 return -EIO;
3711
Miao Xie5f546062014-07-24 11:37:09 +08003712 /* Seed devices of a new filesystem has their own generation. */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003713 if (scrub_dev->fs_devices != fs_info->fs_devices)
Miao Xie5f546062014-07-24 11:37:09 +08003714 gen = scrub_dev->generation;
3715 else
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003716 gen = fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003717
3718 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3719 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003720 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3721 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003722 break;
3723
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003724 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003725 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003726 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003727 if (ret)
3728 return ret;
3729 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003730 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003731
3732 return 0;
3733}
3734
3735/*
3736 * get a reference count on fs_info->scrub_workers. start worker if necessary
3737 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003738static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3739 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003740{
David Sterba6f011052015-02-16 18:34:01 +01003741 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003742 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003743
Anand Jaineb4318e2019-01-30 14:45:01 +08003744 lockdep_assert_held(&fs_info->scrub_lock);
3745
Anand Jainff09c4c2019-01-30 14:45:02 +08003746 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
David Sterbac8352942019-02-12 16:51:18 +01003747 ASSERT(fs_info->scrub_workers == NULL);
David Sterbaaf1cbe02017-03-31 18:42:57 +02003748 fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
3749 flags, is_dev_replace ? 1 : max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08003750 if (!fs_info->scrub_workers)
3751 goto fail_scrub_workers;
3752
David Sterbac8352942019-02-12 16:51:18 +01003753 ASSERT(fs_info->scrub_wr_completion_workers == NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08003754 fs_info->scrub_wr_completion_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003755 btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
Qu Wenruo0339ef22014-02-28 10:46:17 +08003756 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003757 if (!fs_info->scrub_wr_completion_workers)
3758 goto fail_scrub_wr_completion_workers;
3759
David Sterbac8352942019-02-12 16:51:18 +01003760 ASSERT(fs_info->scrub_parity_workers == NULL);
Zhao Lei20b2e302015-06-04 20:09:15 +08003761 fs_info->scrub_parity_workers =
Jeff Mahoneycb001092016-06-09 16:22:11 -04003762 btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
Zhao Lei20b2e302015-06-04 20:09:15 +08003763 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003764 if (!fs_info->scrub_parity_workers)
3765 goto fail_scrub_parity_workers;
Anand Jainff09c4c2019-01-30 14:45:02 +08003766
3767 refcount_set(&fs_info->scrub_workers_refcnt, 1);
3768 } else {
3769 refcount_inc(&fs_info->scrub_workers_refcnt);
Arne Jansen632dd772011-06-10 12:07:07 +02003770 }
Zhao Leie82afc52015-06-12 20:36:58 +08003771 return 0;
3772
3773fail_scrub_parity_workers:
Zhao Leie82afc52015-06-12 20:36:58 +08003774 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3775fail_scrub_wr_completion_workers:
3776 btrfs_destroy_workqueue(fs_info->scrub_workers);
3777fail_scrub_workers:
3778 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01003779}
3780
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003781int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3782 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003783 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003784{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003785 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003786 int ret;
3787 struct btrfs_device *dev;
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003788 unsigned int nofs_flag;
Anand Jain1cec3f272019-01-30 14:45:00 +08003789 struct btrfs_workqueue *scrub_workers = NULL;
3790 struct btrfs_workqueue *scrub_wr_comp = NULL;
3791 struct btrfs_workqueue *scrub_parity = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01003792
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003793 if (btrfs_fs_closing(fs_info))
David Sterba6c3abed2019-02-25 19:57:41 +01003794 return -EAGAIN;
Arne Jansena2de7332011-03-08 14:14:00 +01003795
Jeff Mahoneyda170662016-06-15 09:22:56 -04003796 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003797 /*
3798 * in this case scrub is unable to calculate the checksum
3799 * the way scrub is implemented. Do not handle this
3800 * situation at all because it won't ever happen.
3801 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003802 btrfs_err(fs_info,
3803 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003804 fs_info->nodesize,
3805 BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003806 return -EINVAL;
3807 }
3808
Jeff Mahoneyda170662016-06-15 09:22:56 -04003809 if (fs_info->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003810 /* not supported for data w/o checksums */
Chandan Rajendra751bebbe2016-07-04 10:04:39 +05303811 btrfs_err_rl(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003812 "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003813 fs_info->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003814 return -EINVAL;
3815 }
3816
Jeff Mahoneyda170662016-06-15 09:22:56 -04003817 if (fs_info->nodesize >
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003818 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
Jeff Mahoneyda170662016-06-15 09:22:56 -04003819 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003820 /*
3821 * would exhaust the array bounds of pagev member in
3822 * struct scrub_block
3823 */
Jeff Mahoney5d163e02016-09-20 10:05:00 -04003824 btrfs_err(fs_info,
3825 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Jeff Mahoneyda170662016-06-15 09:22:56 -04003826 fs_info->nodesize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003827 SCRUB_MAX_PAGES_PER_BLOCK,
Jeff Mahoneyda170662016-06-15 09:22:56 -04003828 fs_info->sectorsize,
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003829 SCRUB_MAX_PAGES_PER_BLOCK);
3830 return -EINVAL;
3831 }
3832
David Sterba0e94c4f42018-12-04 16:11:56 +01003833 /* Allocate outside of device_list_mutex */
3834 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
3835 if (IS_ERR(sctx))
3836 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003837
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003838 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Anand Jain09ba3bc2019-01-19 14:48:55 +08003839 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
Anand Jaine6e674b2017-12-04 12:54:54 +08003840 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
3841 !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003842 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
David Sterba0e94c4f42018-12-04 16:11:56 +01003843 ret = -ENODEV;
3844 goto out_free_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003845 }
Arne Jansena2de7332011-03-08 14:14:00 +01003846
Anand Jainebbede42017-12-04 12:54:52 +08003847 if (!is_dev_replace && !readonly &&
3848 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
Miao Xie5d68da32014-07-24 11:37:07 +08003849 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Misono Tomohiro672d5992018-08-02 16:19:07 +09003850 btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
3851 rcu_str_deref(dev->name));
David Sterba0e94c4f42018-12-04 16:11:56 +01003852 ret = -EROFS;
3853 goto out_free_ctx;
Miao Xie5d68da32014-07-24 11:37:07 +08003854 }
3855
Wang Shilong3b7a0162013-10-12 02:11:12 +08003856 mutex_lock(&fs_info->scrub_lock);
Anand Jaine12c9622017-12-04 12:54:53 +08003857 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
Anand Jain401e29c2017-12-04 12:54:55 +08003858 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003859 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003860 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
David Sterba0e94c4f42018-12-04 16:11:56 +01003861 ret = -EIO;
3862 goto out_free_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003863 }
3864
David Sterbacb5583d2018-09-07 16:11:23 +02003865 down_read(&fs_info->dev_replace.rwsem);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003866 if (dev->scrub_ctx ||
Stefan Behrens8dabb742012-11-06 13:15:27 +01003867 (!is_dev_replace &&
3868 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
David Sterbacb5583d2018-09-07 16:11:23 +02003869 up_read(&fs_info->dev_replace.rwsem);
Arne Jansena2de7332011-03-08 14:14:00 +01003870 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003871 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
David Sterba0e94c4f42018-12-04 16:11:56 +01003872 ret = -EINPROGRESS;
3873 goto out_free_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003874 }
David Sterbacb5583d2018-09-07 16:11:23 +02003875 up_read(&fs_info->dev_replace.rwsem);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003876
3877 ret = scrub_workers_get(fs_info, is_dev_replace);
3878 if (ret) {
3879 mutex_unlock(&fs_info->scrub_lock);
3880 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
David Sterba0e94c4f42018-12-04 16:11:56 +01003881 goto out_free_ctx;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003882 }
3883
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003884 sctx->readonly = readonly;
Anand Jaincadbc0a2018-01-03 16:08:30 +08003885 dev->scrub_ctx = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003886 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003887
Wang Shilong3cb09292013-12-04 21:15:19 +08003888 /*
3889 * checking @scrub_pause_req here, we can avoid
3890 * race between committing transaction and scrubbing.
3891 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003892 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003893 atomic_inc(&fs_info->scrubs_running);
3894 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003895
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003896 /*
3897 * In order to avoid deadlock with reclaim when there is a transaction
3898 * trying to pause scrub, make sure we use GFP_NOFS for all the
3899 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3900 * invoked by our callees. The pausing request is done when the
3901 * transaction commit starts, and it blocks the transaction until scrub
3902 * is paused (done at specific points at scrub_stripe() or right above
3903 * before incrementing fs_info->scrubs_running).
3904 */
3905 nofs_flag = memalloc_nofs_save();
Stefan Behrensff023aa2012-11-06 11:43:11 +01003906 if (!is_dev_replace) {
Anand Jaind1e14422019-01-03 16:17:40 +08003907 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
Wang Shilong9b011ad2013-10-25 19:12:02 +08003908 /*
3909 * by holding device list mutex, we can
3910 * kick off writing super in log tree sync.
3911 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003912 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003913 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003914 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003915 }
Arne Jansena2de7332011-03-08 14:14:00 +01003916
3917 if (!ret)
Omar Sandoval32934282018-08-14 11:09:52 -07003918 ret = scrub_enumerate_chunks(sctx, dev, start, end);
Filipe Mananaa5fb1142018-11-26 20:07:17 +00003919 memalloc_nofs_restore(nofs_flag);
Arne Jansena2de7332011-03-08 14:14:00 +01003920
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003921 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003922 atomic_dec(&fs_info->scrubs_running);
3923 wake_up(&fs_info->scrub_pause_wait);
3924
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003925 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003926
Arne Jansena2de7332011-03-08 14:14:00 +01003927 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003928 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003929
Anand Jaind1e14422019-01-03 16:17:40 +08003930 if (!is_dev_replace)
3931 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3932 ret ? "not finished" : "finished", devid, ret);
3933
Arne Jansena2de7332011-03-08 14:14:00 +01003934 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08003935 dev->scrub_ctx = NULL;
Anand Jainff09c4c2019-01-30 14:45:02 +08003936 if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
Anand Jain1cec3f272019-01-30 14:45:00 +08003937 scrub_workers = fs_info->scrub_workers;
3938 scrub_wr_comp = fs_info->scrub_wr_completion_workers;
3939 scrub_parity = fs_info->scrub_parity_workers;
David Sterbac8352942019-02-12 16:51:18 +01003940
3941 fs_info->scrub_workers = NULL;
3942 fs_info->scrub_wr_completion_workers = NULL;
3943 fs_info->scrub_parity_workers = NULL;
Anand Jain1cec3f272019-01-30 14:45:00 +08003944 }
Arne Jansena2de7332011-03-08 14:14:00 +01003945 mutex_unlock(&fs_info->scrub_lock);
3946
Anand Jain1cec3f272019-01-30 14:45:00 +08003947 btrfs_destroy_workqueue(scrub_workers);
3948 btrfs_destroy_workqueue(scrub_wr_comp);
3949 btrfs_destroy_workqueue(scrub_parity);
Filipe Mananaf55985f2015-02-09 21:14:24 +00003950 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003951
3952 return ret;
David Sterba0e94c4f42018-12-04 16:11:56 +01003953
3954out_free_ctx:
3955 scrub_free_ctx(sctx);
3956
3957 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003958}
3959
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003960void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003961{
Arne Jansena2de7332011-03-08 14:14:00 +01003962 mutex_lock(&fs_info->scrub_lock);
3963 atomic_inc(&fs_info->scrub_pause_req);
3964 while (atomic_read(&fs_info->scrubs_paused) !=
3965 atomic_read(&fs_info->scrubs_running)) {
3966 mutex_unlock(&fs_info->scrub_lock);
3967 wait_event(fs_info->scrub_pause_wait,
3968 atomic_read(&fs_info->scrubs_paused) ==
3969 atomic_read(&fs_info->scrubs_running));
3970 mutex_lock(&fs_info->scrub_lock);
3971 }
3972 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003973}
3974
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04003975void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003976{
Arne Jansena2de7332011-03-08 14:14:00 +01003977 atomic_dec(&fs_info->scrub_pause_req);
3978 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003979}
3980
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003981int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003982{
Arne Jansena2de7332011-03-08 14:14:00 +01003983 mutex_lock(&fs_info->scrub_lock);
3984 if (!atomic_read(&fs_info->scrubs_running)) {
3985 mutex_unlock(&fs_info->scrub_lock);
3986 return -ENOTCONN;
3987 }
3988
3989 atomic_inc(&fs_info->scrub_cancel_req);
3990 while (atomic_read(&fs_info->scrubs_running)) {
3991 mutex_unlock(&fs_info->scrub_lock);
3992 wait_event(fs_info->scrub_pause_wait,
3993 atomic_read(&fs_info->scrubs_running) == 0);
3994 mutex_lock(&fs_info->scrub_lock);
3995 }
3996 atomic_dec(&fs_info->scrub_cancel_req);
3997 mutex_unlock(&fs_info->scrub_lock);
3998
3999 return 0;
4000}
4001
David Sterba163e97e2019-03-20 16:32:55 +01004002int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01004003{
David Sterba163e97e2019-03-20 16:32:55 +01004004 struct btrfs_fs_info *fs_info = dev->fs_info;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004005 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01004006
4007 mutex_lock(&fs_info->scrub_lock);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004008 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004009 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004010 mutex_unlock(&fs_info->scrub_lock);
4011 return -ENOTCONN;
4012 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004013 atomic_inc(&sctx->cancel_req);
Anand Jaincadbc0a2018-01-03 16:08:30 +08004014 while (dev->scrub_ctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01004015 mutex_unlock(&fs_info->scrub_lock);
4016 wait_event(fs_info->scrub_pause_wait,
Anand Jaincadbc0a2018-01-03 16:08:30 +08004017 dev->scrub_ctx == NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004018 mutex_lock(&fs_info->scrub_lock);
4019 }
4020 mutex_unlock(&fs_info->scrub_lock);
4021
4022 return 0;
4023}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004024
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04004025int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
Arne Jansena2de7332011-03-08 14:14:00 +01004026 struct btrfs_scrub_progress *progress)
4027{
4028 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004029 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004030
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004031 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Anand Jain09ba3bc2019-01-19 14:48:55 +08004032 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
Arne Jansena2de7332011-03-08 14:14:00 +01004033 if (dev)
Anand Jaincadbc0a2018-01-03 16:08:30 +08004034 sctx = dev->scrub_ctx;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004035 if (sctx)
4036 memcpy(progress, &sctx->stat, sizeof(*progress));
Jeff Mahoney0b246af2016-06-22 18:54:23 -04004037 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01004038
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004039 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004040}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004041
4042static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4043 u64 extent_logical, u64 extent_len,
4044 u64 *extent_physical,
4045 struct btrfs_device **extent_dev,
4046 int *extent_mirror_num)
4047{
4048 u64 mapped_length;
4049 struct btrfs_bio *bbio = NULL;
4050 int ret;
4051
4052 mapped_length = extent_len;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +02004053 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
Stefan Behrensff023aa2012-11-06 11:43:11 +01004054 &mapped_length, &bbio, 0);
4055 if (ret || !bbio || mapped_length < extent_len ||
4056 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004057 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004058 return;
4059 }
4060
4061 *extent_physical = bbio->stripes[0].physical;
4062 *extent_mirror_num = bbio->mirror_num;
4063 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004064 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004065}