blob: d64f557ea91e4535f9a726be97e7411d2a45118a [file] [log] [blame]
Arne Jansena2de7332011-03-08 14:14:00 +01001/*
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
Arne Jansena2de7332011-03-08 14:14:00 +01003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Arne Jansena2de7332011-03-08 14:14:00 +010019#include <linux/blkdev.h>
Jan Schmidt558540c2011-06-13 19:59:12 +020020#include <linux/ratelimit.h>
Arne Jansena2de7332011-03-08 14:14:00 +010021#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
Jan Schmidt0ef8e452011-06-13 20:04:15 +020025#include "transaction.h"
Jan Schmidt558540c2011-06-13 19:59:12 +020026#include "backref.h"
Jan Schmidt5da6fcb2011-08-04 18:11:04 +020027#include "extent_io.h"
Stefan Behrensff023aa2012-11-06 11:43:11 +010028#include "dev-replace.h"
Stefan Behrens21adbd52011-11-09 13:44:05 +010029#include "check-integrity.h"
Josef Bacik606686e2012-06-04 14:03:51 -040030#include "rcu-string.h"
David Woodhouse53b381b2013-01-29 18:40:14 -050031#include "raid56.h"
Arne Jansena2de7332011-03-08 14:14:00 +010032
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
Arne Jansena2de7332011-03-08 14:14:00 +010040 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
Arne Jansena2de7332011-03-08 14:14:00 +010042 * - track and record media errors, throw out bad devices
Arne Jansena2de7332011-03-08 14:14:00 +010043 * - add a mode to also read unallocated space
Arne Jansena2de7332011-03-08 14:14:00 +010044 */
45
Stefan Behrensb5d67f62012-03-27 14:21:27 -040046struct scrub_block;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010047struct scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +010048
Stefan Behrensff023aa2012-11-06 11:43:11 +010049/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
Stefan Behrens7a9e9982012-11-02 14:58:04 +010058
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -040064#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
Arne Jansena2de7332011-03-08 14:14:00 +010065
Miao Xieaf8e2d12014-10-23 14:42:50 +080066struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +080069 u64 map_length;
70};
71
Arne Jansena2de7332011-03-08 14:14:00 +010072struct scrub_page {
Stefan Behrensb5d67f62012-03-27 14:21:27 -040073 struct scrub_block *sblock;
74 struct page *page;
Stefan Behrens442a4f62012-05-25 16:06:08 +020075 struct btrfs_device *dev;
Miao Xie5a6ac9e2014-11-06 17:20:58 +080076 struct list_head list;
Arne Jansena2de7332011-03-08 14:14:00 +010077 u64 flags; /* extent flags */
78 u64 generation;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040079 u64 logical;
80 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +010081 u64 physical_for_dev_replace;
Zhao Lei57019342015-01-20 15:11:45 +080082 atomic_t refs;
Stefan Behrensb5d67f62012-03-27 14:21:27 -040083 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
Arne Jansena2de7332011-03-08 14:14:00 +010088 u8 csum[BTRFS_CSUM_SIZE];
Miao Xieaf8e2d12014-10-23 14:42:50 +080089
90 struct scrub_recover *recover;
Arne Jansena2de7332011-03-08 14:14:00 +010091};
92
93struct scrub_bio {
94 int index;
Stefan Behrensd9d181c2012-11-02 09:58:09 +010095 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +010096 struct btrfs_device *dev;
Arne Jansena2de7332011-03-08 14:14:00 +010097 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400106 int page_count;
Arne Jansena2de7332011-03-08 14:14:00 +0100107 int next_free;
108 struct btrfs_work work;
109};
110
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400111struct scrub_block {
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400113 int page_count;
114 atomic_t outstanding_pages;
Zhao Lei57019342015-01-20 15:11:45 +0800115 atomic_t refs; /* free mem on transition to zero */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100116 struct scrub_ctx *sctx;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800117 struct scrub_parity *sparity;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200122 unsigned int generation_error:1; /* also sets header_error */
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400127 };
Omar Sandoval73ff61d2015-06-19 11:52:51 -0700128 struct btrfs_work work;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400129};
130
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
Zhao Lei57019342015-01-20 15:11:45 +0800145 atomic_t refs;
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
Stefan Behrensff023aa2012-11-06 11:43:11 +0100164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100172struct scrub_ctx {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100174 struct btrfs_root *dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +0100175 int first_free;
176 int curr;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
Arne Jansena2de7332011-03-08 14:14:00 +0100179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
Arne Jansen86287642011-03-23 16:34:19 +0100184 int readonly;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100185 int pages_per_rd_bio;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400186 u32 sectorsize;
187 u32 nodesize;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100188
189 int is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100190 struct scrub_wr_ctx wr_ctx;
Stefan Behrens63a212a2012-11-05 18:29:28 +0100191
Arne Jansena2de7332011-03-08 14:14:00 +0100192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
205 atomic_t refs;
Arne Jansena2de7332011-03-08 14:14:00 +0100206};
207
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200208struct scrub_fixup_nodatasum {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100209 struct scrub_ctx *sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100210 struct btrfs_device *dev;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
Josef Bacik652f25a2013-09-12 16:58:28 -0400217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
Stefan Behrensff023aa2012-11-06 11:43:11 +0100224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -0400230 struct list_head inodes;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100231 struct btrfs_work work;
232};
233
Jan Schmidt558540c2011-06-13 19:59:12 +0200234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
Jan Schmidt558540c2011-06-13 19:59:12 +0200237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
Jan Schmidt558540c2011-06-13 19:59:12 +0200241};
242
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100243static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400247static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
Zhao Leibe50a8d2015-01-20 15:11:42 +0800248static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100249 struct scrub_block *sblocks_for_recheck);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +0100250static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251 struct scrub_block *sblock, int is_metadata,
252 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800253 u16 csum_size, int retry_failed_mirror);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400254static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
255 struct scrub_block *sblock,
256 int is_metadata, int have_csum,
257 const u8 *csum, u64 generation,
258 u16 csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400259static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +0800260 struct scrub_block *sblock_good);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400261static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
262 struct scrub_block *sblock_good,
263 int page_num, int force_write);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100264static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
265static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
266 int page_num);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400267static int scrub_checksum_data(struct scrub_block *sblock);
268static int scrub_checksum_tree_block(struct scrub_block *sblock);
269static int scrub_checksum_super(struct scrub_block *sblock);
270static void scrub_block_get(struct scrub_block *sblock);
271static void scrub_block_put(struct scrub_block *sblock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100272static void scrub_page_get(struct scrub_page *spage);
273static void scrub_page_put(struct scrub_page *spage);
Miao Xie5a6ac9e2014-11-06 17:20:58 +0800274static void scrub_parity_get(struct scrub_parity *sparity);
275static void scrub_parity_put(struct scrub_parity *sparity);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100276static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
277 struct scrub_page *spage);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100278static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100279 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +0100280 u64 gen, int mirror_num, u8 *csum, int force,
281 u64 physical_for_dev_replace);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400282static void scrub_bio_end_io(struct bio *bio, int err);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400283static void scrub_bio_end_io_worker(struct btrfs_work *work);
284static void scrub_block_complete(struct scrub_block *sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100285static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
286 u64 extent_logical, u64 extent_len,
287 u64 *extent_physical,
288 struct btrfs_device **extent_dev,
289 int *extent_mirror_num);
290static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
291 struct scrub_wr_ctx *wr_ctx,
292 struct btrfs_fs_info *fs_info,
293 struct btrfs_device *dev,
294 int is_dev_replace);
295static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
296static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
297 struct scrub_page *spage);
298static void scrub_wr_submit(struct scrub_ctx *sctx);
299static void scrub_wr_bio_end_io(struct bio *bio, int err);
300static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
301static int write_page_nocow(struct scrub_ctx *sctx,
302 u64 physical_for_dev_replace, struct page *page);
303static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
Josef Bacik652f25a2013-09-12 16:58:28 -0400304 struct scrub_copy_nocow_ctx *ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100305static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
306 int mirror_num, u64 physical_for_dev_replace);
307static void copy_nocow_pages_worker(struct btrfs_work *work);
Wang Shilongcb7ab022013-12-04 21:16:53 +0800308static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Wang Shilong3cb09292013-12-04 21:15:19 +0800309static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000310static void scrub_put_ctx(struct scrub_ctx *sctx);
Stefan Behrens1623ede2012-03-27 14:21:26 -0400311
312
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100313static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
314{
Filipe Mananaf55985f2015-02-09 21:14:24 +0000315 atomic_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100316 atomic_inc(&sctx->bios_in_flight);
317}
318
319static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
320{
321 atomic_dec(&sctx->bios_in_flight);
322 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000323 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100324}
325
Wang Shilongcb7ab022013-12-04 21:16:53 +0800326static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
Wang Shilong3cb09292013-12-04 21:15:19 +0800327{
328 while (atomic_read(&fs_info->scrub_pause_req)) {
329 mutex_unlock(&fs_info->scrub_lock);
330 wait_event(fs_info->scrub_pause_wait,
331 atomic_read(&fs_info->scrub_pause_req) == 0);
332 mutex_lock(&fs_info->scrub_lock);
333 }
334}
335
Zhaolei0e22be82015-08-05 16:43:28 +0800336static void scrub_pause_on(struct btrfs_fs_info *fs_info)
Wang Shilongcb7ab022013-12-04 21:16:53 +0800337{
338 atomic_inc(&fs_info->scrubs_paused);
339 wake_up(&fs_info->scrub_pause_wait);
Zhaolei0e22be82015-08-05 16:43:28 +0800340}
Wang Shilongcb7ab022013-12-04 21:16:53 +0800341
Zhaolei0e22be82015-08-05 16:43:28 +0800342static void scrub_pause_off(struct btrfs_fs_info *fs_info)
343{
Wang Shilongcb7ab022013-12-04 21:16:53 +0800344 mutex_lock(&fs_info->scrub_lock);
345 __scrub_blocked_if_needed(fs_info);
346 atomic_dec(&fs_info->scrubs_paused);
347 mutex_unlock(&fs_info->scrub_lock);
348
349 wake_up(&fs_info->scrub_pause_wait);
350}
351
Zhaolei0e22be82015-08-05 16:43:28 +0800352static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
353{
354 scrub_pause_on(fs_info);
355 scrub_pause_off(fs_info);
356}
357
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100358/*
359 * used for workers that require transaction commits (i.e., for the
360 * NOCOW case)
361 */
362static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
363{
364 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
365
Filipe Mananaf55985f2015-02-09 21:14:24 +0000366 atomic_inc(&sctx->refs);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100367 /*
368 * increment scrubs_running to prevent cancel requests from
369 * completing as long as a worker is running. we must also
370 * increment scrubs_paused to prevent deadlocking on pause
371 * requests used for transactions commits (as the worker uses a
372 * transaction context). it is safe to regard the worker
373 * as paused for all matters practical. effectively, we only
374 * avoid cancellation requests from completing.
375 */
376 mutex_lock(&fs_info->scrub_lock);
377 atomic_inc(&fs_info->scrubs_running);
378 atomic_inc(&fs_info->scrubs_paused);
379 mutex_unlock(&fs_info->scrub_lock);
Wang Shilong32a44782014-02-19 19:24:19 +0800380
381 /*
382 * check if @scrubs_running=@scrubs_paused condition
383 * inside wait_event() is not an atomic operation.
384 * which means we may inc/dec @scrub_running/paused
385 * at any time. Let's wake up @scrub_pause_wait as
386 * much as we can to let commit transaction blocked less.
387 */
388 wake_up(&fs_info->scrub_pause_wait);
389
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100390 atomic_inc(&sctx->workers_pending);
391}
392
393/* used for workers that require transaction commits */
394static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
395{
396 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
397
398 /*
399 * see scrub_pending_trans_workers_inc() why we're pretending
400 * to be paused in the scrub counters
401 */
402 mutex_lock(&fs_info->scrub_lock);
403 atomic_dec(&fs_info->scrubs_running);
404 atomic_dec(&fs_info->scrubs_paused);
405 mutex_unlock(&fs_info->scrub_lock);
406 atomic_dec(&sctx->workers_pending);
407 wake_up(&fs_info->scrub_pause_wait);
408 wake_up(&sctx->list_wait);
Filipe Mananaf55985f2015-02-09 21:14:24 +0000409 scrub_put_ctx(sctx);
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100410}
411
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100412static void scrub_free_csums(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100413{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100414 while (!list_empty(&sctx->csum_list)) {
Arne Jansena2de7332011-03-08 14:14:00 +0100415 struct btrfs_ordered_sum *sum;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100416 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +0100417 struct btrfs_ordered_sum, list);
418 list_del(&sum->list);
419 kfree(sum);
420 }
421}
422
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100423static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100424{
425 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100426
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100427 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100428 return;
429
Stefan Behrensff023aa2012-11-06 11:43:11 +0100430 scrub_free_wr_ctx(&sctx->wr_ctx);
431
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400432 /* this can happen when scrub is cancelled */
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100433 if (sctx->curr != -1) {
434 struct scrub_bio *sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400435
436 for (i = 0; i < sbio->page_count; i++) {
Stefan Behrensff023aa2012-11-06 11:43:11 +0100437 WARN_ON(!sbio->pagev[i]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400438 scrub_block_put(sbio->pagev[i]->sblock);
439 }
440 bio_put(sbio->bio);
441 }
442
Stefan Behrensff023aa2012-11-06 11:43:11 +0100443 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100444 struct scrub_bio *sbio = sctx->bios[i];
Arne Jansena2de7332011-03-08 14:14:00 +0100445
446 if (!sbio)
447 break;
Arne Jansena2de7332011-03-08 14:14:00 +0100448 kfree(sbio);
449 }
450
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100451 scrub_free_csums(sctx);
452 kfree(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100453}
454
Filipe Mananaf55985f2015-02-09 21:14:24 +0000455static void scrub_put_ctx(struct scrub_ctx *sctx)
456{
457 if (atomic_dec_and_test(&sctx->refs))
458 scrub_free_ctx(sctx);
459}
460
Arne Jansena2de7332011-03-08 14:14:00 +0100461static noinline_for_stack
Stefan Behrens63a212a2012-11-05 18:29:28 +0100462struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +0100463{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100464 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100465 int i;
Arne Jansena2de7332011-03-08 14:14:00 +0100466 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100467 int pages_per_rd_bio;
468 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +0100469
Stefan Behrensff023aa2012-11-06 11:43:11 +0100470 /*
471 * the setting of pages_per_rd_bio is correct for scrub but might
472 * be wrong for the dev_replace code where we might read from
473 * different devices in the initial huge bios. However, that
474 * code is able to correctly handle the case when adding a page
475 * to a bio fails.
476 */
477 if (dev->bdev)
478 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
479 bio_get_nr_vecs(dev->bdev));
480 else
481 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100482 sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
483 if (!sctx)
Arne Jansena2de7332011-03-08 14:14:00 +0100484 goto nomem;
Filipe Mananaf55985f2015-02-09 21:14:24 +0000485 atomic_set(&sctx->refs, 1);
Stefan Behrens63a212a2012-11-05 18:29:28 +0100486 sctx->is_dev_replace = is_dev_replace;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100487 sctx->pages_per_rd_bio = pages_per_rd_bio;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100488 sctx->curr = -1;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100489 sctx->dev_root = dev->dev_root;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100490 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
Arne Jansena2de7332011-03-08 14:14:00 +0100491 struct scrub_bio *sbio;
492
493 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
494 if (!sbio)
495 goto nomem;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100496 sctx->bios[i] = sbio;
Arne Jansena2de7332011-03-08 14:14:00 +0100497
Arne Jansena2de7332011-03-08 14:14:00 +0100498 sbio->index = i;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100499 sbio->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400500 sbio->page_count = 0;
Liu Bo9e0af232014-08-15 23:36:53 +0800501 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
502 scrub_bio_end_io_worker, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +0100503
Stefan Behrensff023aa2012-11-06 11:43:11 +0100504 if (i != SCRUB_BIOS_PER_SCTX - 1)
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100505 sctx->bios[i]->next_free = i + 1;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200506 else
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100507 sctx->bios[i]->next_free = -1;
Arne Jansena2de7332011-03-08 14:14:00 +0100508 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100509 sctx->first_free = 0;
510 sctx->nodesize = dev->dev_root->nodesize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100511 sctx->sectorsize = dev->dev_root->sectorsize;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100512 atomic_set(&sctx->bios_in_flight, 0);
513 atomic_set(&sctx->workers_pending, 0);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100514 atomic_set(&sctx->cancel_req, 0);
515 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
516 INIT_LIST_HEAD(&sctx->csum_list);
Arne Jansena2de7332011-03-08 14:14:00 +0100517
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100518 spin_lock_init(&sctx->list_lock);
519 spin_lock_init(&sctx->stat_lock);
520 init_waitqueue_head(&sctx->list_wait);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100521
522 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
523 fs_info->dev_replace.tgtdev, is_dev_replace);
524 if (ret) {
525 scrub_free_ctx(sctx);
526 return ERR_PTR(ret);
527 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100528 return sctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100529
530nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100531 scrub_free_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +0100532 return ERR_PTR(-ENOMEM);
533}
534
Stefan Behrensff023aa2012-11-06 11:43:11 +0100535static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
536 void *warn_ctx)
Jan Schmidt558540c2011-06-13 19:59:12 +0200537{
538 u64 isize;
539 u32 nlink;
540 int ret;
541 int i;
542 struct extent_buffer *eb;
543 struct btrfs_inode_item *inode_item;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100544 struct scrub_warning *swarn = warn_ctx;
Jan Schmidt558540c2011-06-13 19:59:12 +0200545 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
546 struct inode_fs_paths *ipath = NULL;
547 struct btrfs_root *local_root;
548 struct btrfs_key root_key;
David Sterba1d4c08e2015-01-02 19:36:14 +0100549 struct btrfs_key key;
Jan Schmidt558540c2011-06-13 19:59:12 +0200550
551 root_key.objectid = root;
552 root_key.type = BTRFS_ROOT_ITEM_KEY;
553 root_key.offset = (u64)-1;
554 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
555 if (IS_ERR(local_root)) {
556 ret = PTR_ERR(local_root);
557 goto err;
558 }
559
David Sterba14692cc2015-01-02 18:55:46 +0100560 /*
561 * this makes the path point to (inum INODE_ITEM ioff)
562 */
David Sterba1d4c08e2015-01-02 19:36:14 +0100563 key.objectid = inum;
564 key.type = BTRFS_INODE_ITEM_KEY;
565 key.offset = 0;
566
567 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
Jan Schmidt558540c2011-06-13 19:59:12 +0200568 if (ret) {
569 btrfs_release_path(swarn->path);
570 goto err;
571 }
572
573 eb = swarn->path->nodes[0];
574 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
575 struct btrfs_inode_item);
576 isize = btrfs_inode_size(eb, inode_item);
577 nlink = btrfs_inode_nlink(eb, inode_item);
578 btrfs_release_path(swarn->path);
579
580 ipath = init_ipath(4096, local_root, swarn->path);
Dan Carpenter26bdef52011-11-16 11:28:01 +0300581 if (IS_ERR(ipath)) {
582 ret = PTR_ERR(ipath);
583 ipath = NULL;
584 goto err;
585 }
Jan Schmidt558540c2011-06-13 19:59:12 +0200586 ret = paths_from_inode(inum, ipath);
587
588 if (ret < 0)
589 goto err;
590
591 /*
592 * we deliberately ignore the bit ipath might have been too small to
593 * hold all of the paths here
594 */
595 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
Frank Holtonefe120a2013-12-20 11:37:06 -0500596 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200597 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
598 "length %llu, links %u (path: %s)\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400599 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200600 (unsigned long long)swarn->sector, root, inum, offset,
601 min(isize - offset, (u64)PAGE_SIZE), nlink,
Jeff Mahoney745c4d82011-11-20 07:31:57 -0500602 (char *)(unsigned long)ipath->fspath->val[i]);
Jan Schmidt558540c2011-06-13 19:59:12 +0200603
604 free_ipath(ipath);
605 return 0;
606
607err:
Frank Holtonefe120a2013-12-20 11:37:06 -0500608 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
Jan Schmidt558540c2011-06-13 19:59:12 +0200609 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
610 "resolving failed with ret=%d\n", swarn->errstr,
Josef Bacik606686e2012-06-04 14:03:51 -0400611 swarn->logical, rcu_str_deref(swarn->dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200612 (unsigned long long)swarn->sector, root, inum, offset, ret);
613
614 free_ipath(ipath);
615 return 0;
616}
617
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400618static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
Jan Schmidt558540c2011-06-13 19:59:12 +0200619{
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100620 struct btrfs_device *dev;
621 struct btrfs_fs_info *fs_info;
Jan Schmidt558540c2011-06-13 19:59:12 +0200622 struct btrfs_path *path;
623 struct btrfs_key found_key;
624 struct extent_buffer *eb;
625 struct btrfs_extent_item *ei;
626 struct scrub_warning swarn;
Jan Schmidt558540c2011-06-13 19:59:12 +0200627 unsigned long ptr = 0;
Jan Schmidt4692cf52011-12-02 14:56:41 +0100628 u64 extent_item_pos;
Liu Bo69917e42012-09-07 20:01:28 -0600629 u64 flags = 0;
630 u64 ref_root;
631 u32 item_size;
632 u8 ref_level;
Liu Bo69917e42012-09-07 20:01:28 -0600633 int ret;
Jan Schmidt558540c2011-06-13 19:59:12 +0200634
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100635 WARN_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100636 dev = sblock->pagev[0]->dev;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100637 fs_info = sblock->sctx->dev_root->fs_info;
638
Jan Schmidt558540c2011-06-13 19:59:12 +0200639 path = btrfs_alloc_path();
David Sterba8b9456d2014-07-30 01:25:30 +0200640 if (!path)
641 return;
Jan Schmidt558540c2011-06-13 19:59:12 +0200642
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100643 swarn.sector = (sblock->pagev[0]->physical) >> 9;
644 swarn.logical = sblock->pagev[0]->logical;
Jan Schmidt558540c2011-06-13 19:59:12 +0200645 swarn.errstr = errstr;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100646 swarn.dev = NULL;
Jan Schmidt558540c2011-06-13 19:59:12 +0200647
Liu Bo69917e42012-09-07 20:01:28 -0600648 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
649 &flags);
Jan Schmidt558540c2011-06-13 19:59:12 +0200650 if (ret < 0)
651 goto out;
652
Jan Schmidt4692cf52011-12-02 14:56:41 +0100653 extent_item_pos = swarn.logical - found_key.objectid;
Jan Schmidt558540c2011-06-13 19:59:12 +0200654 swarn.extent_item_size = found_key.offset;
655
656 eb = path->nodes[0];
657 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
658 item_size = btrfs_item_size_nr(eb, path->slots[0]);
659
Liu Bo69917e42012-09-07 20:01:28 -0600660 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Jan Schmidt558540c2011-06-13 19:59:12 +0200661 do {
Liu Bo6eda71d2014-06-09 10:54:07 +0800662 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
663 item_size, &ref_root,
664 &ref_level);
Josef Bacik606686e2012-06-04 14:03:51 -0400665 printk_in_rcu(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -0500666 "BTRFS: %s at logical %llu on dev %s, "
Jan Schmidt558540c2011-06-13 19:59:12 +0200667 "sector %llu: metadata %s (level %d) in tree "
Josef Bacik606686e2012-06-04 14:03:51 -0400668 "%llu\n", errstr, swarn.logical,
669 rcu_str_deref(dev->name),
Jan Schmidt558540c2011-06-13 19:59:12 +0200670 (unsigned long long)swarn.sector,
671 ref_level ? "node" : "leaf",
672 ret < 0 ? -1 : ref_level,
673 ret < 0 ? -1 : ref_root);
674 } while (ret != 1);
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600675 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200676 } else {
Josef Bacikd8fe29e2013-03-29 08:09:34 -0600677 btrfs_release_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200678 swarn.path = path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100679 swarn.dev = dev;
Jan Schmidt7a3ae2f2012-03-23 17:32:28 +0100680 iterate_extent_inodes(fs_info, found_key.objectid,
681 extent_item_pos, 1,
Jan Schmidt558540c2011-06-13 19:59:12 +0200682 scrub_print_warning_inode, &swarn);
683 }
684
685out:
686 btrfs_free_path(path);
Jan Schmidt558540c2011-06-13 19:59:12 +0200687}
688
Stefan Behrensff023aa2012-11-06 11:43:11 +0100689static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200690{
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200691 struct page *page = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200692 unsigned long index;
Stefan Behrensff023aa2012-11-06 11:43:11 +0100693 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200694 int ret;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200695 int corrected = 0;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200696 struct btrfs_key key;
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200697 struct inode *inode = NULL;
Liu Bo6f1c3602013-01-29 03:22:10 +0000698 struct btrfs_fs_info *fs_info;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200699 u64 end = offset + PAGE_SIZE - 1;
700 struct btrfs_root *local_root;
Liu Bo6f1c3602013-01-29 03:22:10 +0000701 int srcu_index;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200702
703 key.objectid = root;
704 key.type = BTRFS_ROOT_ITEM_KEY;
705 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +0000706
707 fs_info = fixup->root->fs_info;
708 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
709
710 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
711 if (IS_ERR(local_root)) {
712 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200713 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +0000714 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200715
716 key.type = BTRFS_INODE_ITEM_KEY;
717 key.objectid = inum;
718 key.offset = 0;
Liu Bo6f1c3602013-01-29 03:22:10 +0000719 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
720 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200721 if (IS_ERR(inode))
722 return PTR_ERR(inode);
723
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200724 index = offset >> PAGE_CACHE_SHIFT;
725
726 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200727 if (!page) {
728 ret = -ENOMEM;
729 goto out;
730 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200731
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200732 if (PageUptodate(page)) {
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200733 if (PageDirty(page)) {
734 /*
735 * we need to write the data to the defect sector. the
736 * data that was in that sector is not in memory,
737 * because the page was modified. we must not write the
738 * modified page to that sector.
739 *
740 * TODO: what could be done here: wait for the delalloc
741 * runner to write out that page (might involve
742 * COW) and see whether the sector is still
743 * referenced afterwards.
744 *
745 * For the meantime, we'll treat this error
746 * incorrectable, although there is a chance that a
747 * later scrub will find the bad sector again and that
748 * there's no dirty page in memory, then.
749 */
750 ret = -EIO;
751 goto out;
752 }
Miao Xie1203b682014-09-12 18:44:01 +0800753 ret = repair_io_failure(inode, offset, PAGE_SIZE,
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200754 fixup->logical, page,
Miao Xieffdd2012014-09-12 18:44:00 +0800755 offset - page_offset(page),
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200756 fixup->mirror_num);
757 unlock_page(page);
758 corrected = !ret;
759 } else {
760 /*
761 * we need to get good data first. the general readpage path
762 * will call repair_io_failure for us, we just have to make
763 * sure we read the bad mirror.
764 */
765 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
766 EXTENT_DAMAGED, GFP_NOFS);
767 if (ret) {
768 /* set_extent_bits should give proper error */
769 WARN_ON(ret > 0);
770 if (ret > 0)
771 ret = -EFAULT;
772 goto out;
773 }
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200774
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200775 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
776 btrfs_get_extent,
777 fixup->mirror_num);
778 wait_on_page_locked(page);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200779
Jan Schmidt5da6fcb2011-08-04 18:11:04 +0200780 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
781 end, EXTENT_DAMAGED, 0, NULL);
782 if (!corrected)
783 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
784 EXTENT_DAMAGED, GFP_NOFS);
785 }
786
787out:
788 if (page)
789 put_page(page);
Tobias Klauser7fb18a02014-04-25 14:58:05 +0200790
791 iput(inode);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200792
793 if (ret < 0)
794 return ret;
795
796 if (ret == 0 && corrected) {
797 /*
798 * we only need to call readpage for one of the inodes belonging
799 * to this extent. so make iterate_extent_inodes stop
800 */
801 return 1;
802 }
803
804 return -EIO;
805}
806
807static void scrub_fixup_nodatasum(struct btrfs_work *work)
808{
809 int ret;
810 struct scrub_fixup_nodatasum *fixup;
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100811 struct scrub_ctx *sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200812 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200813 struct btrfs_path *path;
814 int uncorrectable = 0;
815
816 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100817 sctx = fixup->sctx;
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200818
819 path = btrfs_alloc_path();
820 if (!path) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100821 spin_lock(&sctx->stat_lock);
822 ++sctx->stat.malloc_errors;
823 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200824 uncorrectable = 1;
825 goto out;
826 }
827
828 trans = btrfs_join_transaction(fixup->root);
829 if (IS_ERR(trans)) {
830 uncorrectable = 1;
831 goto out;
832 }
833
834 /*
835 * the idea is to trigger a regular read through the standard path. we
836 * read a page from the (failed) logical address by specifying the
837 * corresponding copynum of the failed sector. thus, that readpage is
838 * expected to fail.
839 * that is the point where on-the-fly error correction will kick in
840 * (once it's finished) and rewrite the failed sector if a good copy
841 * can be found.
842 */
843 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
844 path, scrub_fixup_readpage,
845 fixup);
846 if (ret < 0) {
847 uncorrectable = 1;
848 goto out;
849 }
850 WARN_ON(ret != 1);
851
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100852 spin_lock(&sctx->stat_lock);
853 ++sctx->stat.corrected_errors;
854 spin_unlock(&sctx->stat_lock);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200855
856out:
857 if (trans && !IS_ERR(trans))
858 btrfs_end_transaction(trans, fixup->root);
859 if (uncorrectable) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100860 spin_lock(&sctx->stat_lock);
861 ++sctx->stat.uncorrectable_errors;
862 spin_unlock(&sctx->stat_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +0100863 btrfs_dev_replace_stats_inc(
864 &sctx->dev_root->fs_info->dev_replace.
865 num_uncorrectable_read_errors);
Frank Holtonefe120a2013-12-20 11:37:06 -0500866 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
867 "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200868 fixup->logical, rcu_str_deref(fixup->dev->name));
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200869 }
870
871 btrfs_free_path(path);
872 kfree(fixup);
873
Stefan Behrensb6bfebc2012-11-02 16:44:58 +0100874 scrub_pending_trans_workers_dec(sctx);
Jan Schmidt0ef8e452011-06-13 20:04:15 +0200875}
876
Miao Xieaf8e2d12014-10-23 14:42:50 +0800877static inline void scrub_get_recover(struct scrub_recover *recover)
878{
879 atomic_inc(&recover->refs);
880}
881
882static inline void scrub_put_recover(struct scrub_recover *recover)
883{
884 if (atomic_dec_and_test(&recover->refs)) {
Zhao Lei6e9606d2015-01-20 15:11:34 +0800885 btrfs_put_bbio(recover->bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +0800886 kfree(recover);
887 }
888}
889
Arne Jansena2de7332011-03-08 14:14:00 +0100890/*
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400891 * scrub_handle_errored_block gets called when either verification of the
892 * pages failed or the bio failed to read, e.g. with EIO. In the latter
893 * case, this function handles all pages in the bio, even though only one
894 * may be bad.
895 * The goal of this function is to repair the errored block by using the
896 * contents of one of the mirrors.
Arne Jansena2de7332011-03-08 14:14:00 +0100897 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400898static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
Arne Jansena2de7332011-03-08 14:14:00 +0100899{
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100900 struct scrub_ctx *sctx = sblock_to_check->sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100901 struct btrfs_device *dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400902 struct btrfs_fs_info *fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +0100903 u64 length;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400904 u64 logical;
905 u64 generation;
906 unsigned int failed_mirror_index;
907 unsigned int is_metadata;
908 unsigned int have_csum;
909 u8 *csum;
910 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
911 struct scrub_block *sblock_bad;
Arne Jansena2de7332011-03-08 14:14:00 +0100912 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400913 int mirror_index;
914 int page_num;
915 int success;
916 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
917 DEFAULT_RATELIMIT_BURST);
Arne Jansena2de7332011-03-08 14:14:00 +0100918
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400919 BUG_ON(sblock_to_check->page_count < 1);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100920 fs_info = sctx->dev_root->fs_info;
Stefan Behrens4ded4f62012-11-14 18:57:29 +0000921 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
922 /*
923 * if we find an error in a super block, we just report it.
924 * They will get written with the next transaction commit
925 * anyway
926 */
927 spin_lock(&sctx->stat_lock);
928 ++sctx->stat.super_errors;
929 spin_unlock(&sctx->stat_lock);
930 return 0;
931 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400932 length = sblock_to_check->page_count * PAGE_SIZE;
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100933 logical = sblock_to_check->pagev[0]->logical;
934 generation = sblock_to_check->pagev[0]->generation;
935 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
936 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
937 is_metadata = !(sblock_to_check->pagev[0]->flags &
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400938 BTRFS_EXTENT_FLAG_DATA);
Stefan Behrens7a9e9982012-11-02 14:58:04 +0100939 have_csum = sblock_to_check->pagev[0]->have_csum;
940 csum = sblock_to_check->pagev[0]->csum;
941 dev = sblock_to_check->pagev[0]->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400942
Stefan Behrensff023aa2012-11-06 11:43:11 +0100943 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
944 sblocks_for_recheck = NULL;
945 goto nodatasum_case;
946 }
947
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400948 /*
949 * read all mirrors one after the other. This includes to
950 * re-read the extent or metadata block that failed (that was
951 * the cause that this fixup code is called) another time,
952 * page by page this time in order to know which pages
953 * caused I/O errors and which ones are good (for all mirrors).
954 * It is the goal to handle the situation when more than one
955 * mirror contains I/O errors, but the errors do not
956 * overlap, i.e. the data can be repaired by selecting the
957 * pages from those mirrors without I/O error on the
958 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
959 * would be that mirror #1 has an I/O error on the first page,
960 * the second page is good, and mirror #2 has an I/O error on
961 * the second page, but the first page is good.
962 * Then the first page of the first mirror can be repaired by
963 * taking the first page of the second mirror, and the
964 * second page of the second mirror can be repaired by
965 * copying the contents of the 2nd page of the 1st mirror.
966 * One more note: if the pages of one mirror contain I/O
967 * errors, the checksum cannot be verified. In order to get
968 * the best data for repairing, the first attempt is to find
969 * a mirror without I/O errors and with a validated checksum.
970 * Only if this is not possible, the pages are picked from
971 * mirrors with I/O errors without considering the checksum.
972 * If the latter is the case, at the end, the checksum of the
973 * repaired area is verified in order to correctly maintain
974 * the statistics.
975 */
976
David Sterba31e818f2015-02-20 18:00:26 +0100977 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
978 sizeof(*sblocks_for_recheck), GFP_NOFS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400979 if (!sblocks_for_recheck) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100980 spin_lock(&sctx->stat_lock);
981 sctx->stat.malloc_errors++;
982 sctx->stat.read_errors++;
983 sctx->stat.uncorrectable_errors++;
984 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100985 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400986 goto out;
987 }
988
989 /* setup the context, map the logical blocks and alloc the pages */
Zhao Leibe50a8d2015-01-20 15:11:42 +0800990 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400991 if (ret) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +0100992 spin_lock(&sctx->stat_lock);
993 sctx->stat.read_errors++;
994 sctx->stat.uncorrectable_errors++;
995 spin_unlock(&sctx->stat_lock);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +0100996 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -0400997 goto out;
998 }
999 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1000 sblock_bad = sblocks_for_recheck + failed_mirror_index;
1001
1002 /* build and submit the bios for the failed mirror, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001003 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001004 csum, generation, sctx->csum_size, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001005
1006 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1007 sblock_bad->no_io_error_seen) {
1008 /*
1009 * the error disappeared after reading page by page, or
1010 * the area was part of a huge bio and other parts of the
1011 * bio caused I/O errors, or the block layer merged several
1012 * read requests into one and the error is caused by a
1013 * different bio (usually one of the two latter cases is
1014 * the cause)
1015 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001016 spin_lock(&sctx->stat_lock);
1017 sctx->stat.unverified_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001018 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001019 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001020
Stefan Behrensff023aa2012-11-06 11:43:11 +01001021 if (sctx->is_dev_replace)
1022 scrub_write_block_to_dev_replace(sblock_bad);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001023 goto out;
1024 }
1025
1026 if (!sblock_bad->no_io_error_seen) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001027 spin_lock(&sctx->stat_lock);
1028 sctx->stat.read_errors++;
1029 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001030 if (__ratelimit(&_rs))
1031 scrub_print_warning("i/o error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001032 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001033 } else if (sblock_bad->checksum_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001034 spin_lock(&sctx->stat_lock);
1035 sctx->stat.csum_errors++;
1036 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001037 if (__ratelimit(&_rs))
1038 scrub_print_warning("checksum error", sblock_to_check);
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001039 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001040 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001041 } else if (sblock_bad->header_error) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001042 spin_lock(&sctx->stat_lock);
1043 sctx->stat.verify_errors++;
1044 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001045 if (__ratelimit(&_rs))
1046 scrub_print_warning("checksum/header error",
1047 sblock_to_check);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001048 if (sblock_bad->generation_error)
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001049 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001050 BTRFS_DEV_STAT_GENERATION_ERRS);
1051 else
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001052 btrfs_dev_stat_inc_and_print(dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001053 BTRFS_DEV_STAT_CORRUPTION_ERRS);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001054 }
1055
Ilya Dryomov33ef30a2013-11-03 19:06:38 +02001056 if (sctx->readonly) {
1057 ASSERT(!sctx->is_dev_replace);
1058 goto out;
1059 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001060
1061 if (!is_metadata && !have_csum) {
1062 struct scrub_fixup_nodatasum *fixup_nodatasum;
1063
Stefan Behrensff023aa2012-11-06 11:43:11 +01001064 WARN_ON(sctx->is_dev_replace);
1065
Zhao Leib25c94c2015-01-20 15:11:35 +08001066nodatasum_case:
1067
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001068 /*
1069 * !is_metadata and !have_csum, this means that the data
1070 * might not be COW'ed, that it might be modified
1071 * concurrently. The general strategy to work on the
1072 * commit root does not help in the case when COW is not
1073 * used.
1074 */
1075 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1076 if (!fixup_nodatasum)
1077 goto did_not_correct_error;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001078 fixup_nodatasum->sctx = sctx;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001079 fixup_nodatasum->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001080 fixup_nodatasum->logical = logical;
1081 fixup_nodatasum->root = fs_info->extent_root;
1082 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01001083 scrub_pending_trans_workers_inc(sctx);
Liu Bo9e0af232014-08-15 23:36:53 +08001084 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1085 scrub_fixup_nodatasum, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001086 btrfs_queue_work(fs_info->scrub_workers,
1087 &fixup_nodatasum->work);
Arne Jansena2de7332011-03-08 14:14:00 +01001088 goto out;
1089 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001090
1091 /*
1092 * now build and submit the bios for the other mirrors, check
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001093 * checksums.
1094 * First try to pick the mirror which is completely without I/O
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001095 * errors and also does not have a checksum error.
1096 * If one is found, and if a checksum is present, the full block
1097 * that is known to contain an error is rewritten. Afterwards
1098 * the block is known to be corrected.
1099 * If a mirror is found which is completely correct, and no
1100 * checksum is present, only those pages are rewritten that had
1101 * an I/O error in the block to be repaired, since it cannot be
1102 * determined, which copy of the other pages is better (and it
1103 * could happen otherwise that a correct page would be
1104 * overwritten by a bad one).
1105 */
1106 for (mirror_index = 0;
1107 mirror_index < BTRFS_MAX_MIRRORS &&
1108 sblocks_for_recheck[mirror_index].page_count > 0;
1109 mirror_index++) {
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001110 struct scrub_block *sblock_other;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001111
Stefan Behrenscb2ced72012-11-02 16:14:21 +01001112 if (mirror_index == failed_mirror_index)
1113 continue;
1114 sblock_other = sblocks_for_recheck + mirror_index;
1115
1116 /* build and submit the bios, check checksums */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001117 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1118 have_csum, csum, generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001119 sctx->csum_size, 0);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001120
1121 if (!sblock_other->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001122 !sblock_other->checksum_error &&
1123 sblock_other->no_io_error_seen) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01001124 if (sctx->is_dev_replace) {
1125 scrub_write_block_to_dev_replace(sblock_other);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001126 goto corrected_error;
Zhao Lei114ab502015-01-20 15:11:36 +08001127 } else {
1128 ret = scrub_repair_block_from_good_copy(
1129 sblock_bad, sblock_other);
1130 if (!ret)
1131 goto corrected_error;
1132 }
Arne Jansena2de7332011-03-08 14:14:00 +01001133 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001134 }
1135
Zhao Leib968fed2015-01-20 15:11:41 +08001136 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1137 goto did_not_correct_error;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001138
1139 /*
Stefan Behrensff023aa2012-11-06 11:43:11 +01001140 * In case of I/O errors in the area that is supposed to be
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001141 * repaired, continue by picking good copies of those pages.
1142 * Select the good pages from mirrors to rewrite bad pages from
1143 * the area to fix. Afterwards verify the checksum of the block
1144 * that is supposed to be repaired. This verification step is
1145 * only done for the purpose of statistic counting and for the
1146 * final scrub report, whether errors remain.
1147 * A perfect algorithm could make use of the checksum and try
1148 * all possible combinations of pages from the different mirrors
1149 * until the checksum verification succeeds. For example, when
1150 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1151 * of mirror #2 is readable but the final checksum test fails,
1152 * then the 2nd page of mirror #3 could be tried, whether now
1153 * the final checksum succeedes. But this would be a rare
1154 * exception and is therefore not implemented. At least it is
1155 * avoided that the good copy is overwritten.
1156 * A more useful improvement would be to pick the sectors
1157 * without I/O error based on sector sizes (512 bytes on legacy
1158 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1159 * mirror could be repaired by taking 512 byte of a different
1160 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1161 * area are unreadable.
1162 */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001163 success = 1;
Zhao Leib968fed2015-01-20 15:11:41 +08001164 for (page_num = 0; page_num < sblock_bad->page_count;
1165 page_num++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001166 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
Zhao Leib968fed2015-01-20 15:11:41 +08001167 struct scrub_block *sblock_other = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001168
Zhao Leib968fed2015-01-20 15:11:41 +08001169 /* skip no-io-error page in scrub */
1170 if (!page_bad->io_error && !sctx->is_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001171 continue;
1172
Zhao Leib968fed2015-01-20 15:11:41 +08001173 /* try to find no-io-error page in mirrors */
1174 if (page_bad->io_error) {
1175 for (mirror_index = 0;
1176 mirror_index < BTRFS_MAX_MIRRORS &&
1177 sblocks_for_recheck[mirror_index].page_count > 0;
1178 mirror_index++) {
1179 if (!sblocks_for_recheck[mirror_index].
1180 pagev[page_num]->io_error) {
1181 sblock_other = sblocks_for_recheck +
1182 mirror_index;
1183 break;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001184 }
Jan Schmidt13db62b2011-06-13 19:56:13 +02001185 }
Zhao Leib968fed2015-01-20 15:11:41 +08001186 if (!sblock_other)
1187 success = 0;
Jan Schmidt13db62b2011-06-13 19:56:13 +02001188 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001189
Zhao Leib968fed2015-01-20 15:11:41 +08001190 if (sctx->is_dev_replace) {
1191 /*
1192 * did not find a mirror to fetch the page
1193 * from. scrub_write_page_to_dev_replace()
1194 * handles this case (page->io_error), by
1195 * filling the block with zeros before
1196 * submitting the write request
1197 */
1198 if (!sblock_other)
1199 sblock_other = sblock_bad;
1200
1201 if (scrub_write_page_to_dev_replace(sblock_other,
1202 page_num) != 0) {
1203 btrfs_dev_replace_stats_inc(
1204 &sctx->dev_root->
1205 fs_info->dev_replace.
1206 num_write_errors);
1207 success = 0;
1208 }
1209 } else if (sblock_other) {
1210 ret = scrub_repair_page_from_good_copy(sblock_bad,
1211 sblock_other,
1212 page_num, 0);
1213 if (0 == ret)
1214 page_bad->io_error = 0;
1215 else
1216 success = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001217 }
1218 }
1219
Zhao Leib968fed2015-01-20 15:11:41 +08001220 if (success && !sctx->is_dev_replace) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001221 if (is_metadata || have_csum) {
1222 /*
1223 * need to verify the checksum now that all
1224 * sectors on disk are repaired (the write
1225 * request for data to be repaired is on its way).
1226 * Just be lazy and use scrub_recheck_block()
1227 * which re-reads the data before the checksum
1228 * is verified, but most likely the data comes out
1229 * of the page cache.
1230 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001231 scrub_recheck_block(fs_info, sblock_bad,
1232 is_metadata, have_csum, csum,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001233 generation, sctx->csum_size, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001234 if (!sblock_bad->header_error &&
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001235 !sblock_bad->checksum_error &&
1236 sblock_bad->no_io_error_seen)
1237 goto corrected_error;
1238 else
1239 goto did_not_correct_error;
1240 } else {
1241corrected_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001242 spin_lock(&sctx->stat_lock);
1243 sctx->stat.corrected_errors++;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001244 sblock_to_check->data_corrected = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001245 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001246 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001247 "BTRFS: fixed up error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001248 logical, rcu_str_deref(dev->name));
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001249 }
1250 } else {
1251did_not_correct_error:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001252 spin_lock(&sctx->stat_lock);
1253 sctx->stat.uncorrectable_errors++;
1254 spin_unlock(&sctx->stat_lock);
Josef Bacik606686e2012-06-04 14:03:51 -04001255 printk_ratelimited_in_rcu(KERN_ERR
Frank Holtonefe120a2013-12-20 11:37:06 -05001256 "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02001257 logical, rcu_str_deref(dev->name));
Arne Jansena2de7332011-03-08 14:14:00 +01001258 }
1259
1260out:
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001261 if (sblocks_for_recheck) {
1262 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1263 mirror_index++) {
1264 struct scrub_block *sblock = sblocks_for_recheck +
1265 mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001266 struct scrub_recover *recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001267 int page_index;
1268
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001269 for (page_index = 0; page_index < sblock->page_count;
1270 page_index++) {
1271 sblock->pagev[page_index]->sblock = NULL;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001272 recover = sblock->pagev[page_index]->recover;
1273 if (recover) {
1274 scrub_put_recover(recover);
1275 sblock->pagev[page_index]->recover =
1276 NULL;
1277 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001278 scrub_page_put(sblock->pagev[page_index]);
1279 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001280 }
1281 kfree(sblocks_for_recheck);
1282 }
1283
1284 return 0;
Arne Jansena2de7332011-03-08 14:14:00 +01001285}
1286
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001287static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
Miao Xieaf8e2d12014-10-23 14:42:50 +08001288{
Zhao Lei10f11902015-01-20 15:11:43 +08001289 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1290 return 2;
1291 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1292 return 3;
1293 else
Miao Xieaf8e2d12014-10-23 14:42:50 +08001294 return (int)bbio->num_stripes;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001295}
1296
Zhao Lei10f11902015-01-20 15:11:43 +08001297static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1298 u64 *raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001299 u64 mapped_length,
1300 int nstripes, int mirror,
1301 int *stripe_index,
1302 u64 *stripe_offset)
1303{
1304 int i;
1305
Zhao Leiffe2d202015-01-20 15:11:44 +08001306 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001307 /* RAID5/6 */
1308 for (i = 0; i < nstripes; i++) {
1309 if (raid_map[i] == RAID6_Q_STRIPE ||
1310 raid_map[i] == RAID5_P_STRIPE)
1311 continue;
1312
1313 if (logical >= raid_map[i] &&
1314 logical < raid_map[i] + mapped_length)
1315 break;
1316 }
1317
1318 *stripe_index = i;
1319 *stripe_offset = logical - raid_map[i];
1320 } else {
1321 /* The other RAID type */
1322 *stripe_index = mirror;
1323 *stripe_offset = 0;
1324 }
1325}
1326
Zhao Leibe50a8d2015-01-20 15:11:42 +08001327static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001328 struct scrub_block *sblocks_for_recheck)
Arne Jansena2de7332011-03-08 14:14:00 +01001329{
Zhao Leibe50a8d2015-01-20 15:11:42 +08001330 struct scrub_ctx *sctx = original_sblock->sctx;
1331 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1332 u64 length = original_sblock->page_count * PAGE_SIZE;
1333 u64 logical = original_sblock->pagev[0]->logical;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001334 struct scrub_recover *recover;
1335 struct btrfs_bio *bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001336 u64 sublen;
1337 u64 mapped_length;
1338 u64 stripe_offset;
1339 int stripe_index;
Zhao Leibe50a8d2015-01-20 15:11:42 +08001340 int page_index = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001341 int mirror_index;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001342 int nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001343 int ret;
1344
1345 /*
Zhao Lei57019342015-01-20 15:11:45 +08001346 * note: the two members refs and outstanding_pages
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001347 * are not used (and not set) in the blocks that are used for
1348 * the recheck procedure
1349 */
1350
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001351 while (length > 0) {
Miao Xieaf8e2d12014-10-23 14:42:50 +08001352 sublen = min_t(u64, length, PAGE_SIZE);
1353 mapped_length = sublen;
1354 bbio = NULL;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001355
1356 /*
1357 * with a length of PAGE_SIZE, each returned stripe
1358 * represents one mirror
1359 */
Miao Xieaf8e2d12014-10-23 14:42:50 +08001360 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08001361 &mapped_length, &bbio, 0, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001362 if (ret || !bbio || mapped_length < sublen) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001363 btrfs_put_bbio(bbio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001364 return -EIO;
1365 }
1366
Miao Xieaf8e2d12014-10-23 14:42:50 +08001367 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1368 if (!recover) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08001369 btrfs_put_bbio(bbio);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001370 return -ENOMEM;
1371 }
1372
1373 atomic_set(&recover->refs, 1);
1374 recover->bbio = bbio;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001375 recover->map_length = mapped_length;
1376
Stefan Behrensff023aa2012-11-06 11:43:11 +01001377 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001378
Zhao Leibe50a8d2015-01-20 15:11:42 +08001379 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
Zhao Lei10f11902015-01-20 15:11:43 +08001380
Miao Xieaf8e2d12014-10-23 14:42:50 +08001381 for (mirror_index = 0; mirror_index < nmirrors;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001382 mirror_index++) {
1383 struct scrub_block *sblock;
1384 struct scrub_page *page;
1385
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001386 sblock = sblocks_for_recheck + mirror_index;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001387 sblock->sctx = sctx;
1388 page = kzalloc(sizeof(*page), GFP_NOFS);
1389 if (!page) {
1390leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001391 spin_lock(&sctx->stat_lock);
1392 sctx->stat.malloc_errors++;
1393 spin_unlock(&sctx->stat_lock);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001394 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001395 return -ENOMEM;
1396 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001397 scrub_page_get(page);
1398 sblock->pagev[page_index] = page;
1399 page->logical = logical;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001400
Zhao Lei10f11902015-01-20 15:11:43 +08001401 scrub_stripe_index_and_offset(logical,
1402 bbio->map_type,
1403 bbio->raid_map,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001404 mapped_length,
Zhao Leie34c3302015-01-20 15:11:31 +08001405 bbio->num_stripes -
1406 bbio->num_tgtdevs,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001407 mirror_index,
1408 &stripe_index,
1409 &stripe_offset);
1410 page->physical = bbio->stripes[stripe_index].physical +
1411 stripe_offset;
1412 page->dev = bbio->stripes[stripe_index].dev;
1413
Stefan Behrensff023aa2012-11-06 11:43:11 +01001414 BUG_ON(page_index >= original_sblock->page_count);
1415 page->physical_for_dev_replace =
1416 original_sblock->pagev[page_index]->
1417 physical_for_dev_replace;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001418 /* for missing devices, dev->bdev is NULL */
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001419 page->mirror_num = mirror_index + 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001420 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001421 page->page = alloc_page(GFP_NOFS);
1422 if (!page->page)
1423 goto leave_nomem;
Miao Xieaf8e2d12014-10-23 14:42:50 +08001424
1425 scrub_get_recover(recover);
1426 page->recover = recover;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001427 }
Miao Xieaf8e2d12014-10-23 14:42:50 +08001428 scrub_put_recover(recover);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001429 length -= sublen;
1430 logical += sublen;
1431 page_index++;
1432 }
1433
1434 return 0;
1435}
1436
Miao Xieaf8e2d12014-10-23 14:42:50 +08001437struct scrub_bio_ret {
1438 struct completion event;
1439 int error;
1440};
1441
1442static void scrub_bio_wait_endio(struct bio *bio, int error)
1443{
1444 struct scrub_bio_ret *ret = bio->bi_private;
1445
1446 ret->error = error;
1447 complete(&ret->event);
1448}
1449
1450static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1451{
Zhao Lei10f11902015-01-20 15:11:43 +08001452 return page->recover &&
Zhao Leiffe2d202015-01-20 15:11:44 +08001453 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001454}
1455
1456static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1457 struct bio *bio,
1458 struct scrub_page *page)
1459{
1460 struct scrub_bio_ret done;
1461 int ret;
1462
1463 init_completion(&done.event);
1464 done.error = 0;
1465 bio->bi_iter.bi_sector = page->logical >> 9;
1466 bio->bi_private = &done;
1467 bio->bi_end_io = scrub_bio_wait_endio;
1468
1469 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001470 page->recover->map_length,
Miao Xie42452152014-11-25 16:39:28 +08001471 page->mirror_num, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001472 if (ret)
1473 return ret;
1474
1475 wait_for_completion(&done.event);
1476 if (done.error)
1477 return -EIO;
1478
1479 return 0;
1480}
1481
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001482/*
1483 * this function will check the on disk data for checksum errors, header
1484 * errors and read I/O errors. If any I/O errors happen, the exact pages
1485 * which are errored are marked as being bad. The goal is to enable scrub
1486 * to take those pages that are not errored from all the mirrors so that
1487 * the pages that are errored in the just handled mirror can be repaired.
1488 */
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001489static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1490 struct scrub_block *sblock, int is_metadata,
1491 int have_csum, u8 *csum, u64 generation,
Miao Xieaf8e2d12014-10-23 14:42:50 +08001492 u16 csum_size, int retry_failed_mirror)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001493{
1494 int page_num;
1495
1496 sblock->no_io_error_seen = 1;
1497 sblock->header_error = 0;
1498 sblock->checksum_error = 0;
1499
1500 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1501 struct bio *bio;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001502 struct scrub_page *page = sblock->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001503
Stefan Behrens442a4f62012-05-25 16:06:08 +02001504 if (page->dev->bdev == NULL) {
Stefan Behrensea9947b2012-05-04 15:16:07 -04001505 page->io_error = 1;
1506 sblock->no_io_error_seen = 0;
1507 continue;
1508 }
1509
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001510 WARN_ON(!page->page);
Chris Mason9be33952013-05-17 18:30:14 -04001511 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001512 if (!bio) {
1513 page->io_error = 1;
1514 sblock->no_io_error_seen = 0;
1515 continue;
1516 }
Stefan Behrens442a4f62012-05-25 16:06:08 +02001517 bio->bi_bdev = page->dev->bdev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001518
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001519 bio_add_page(bio, page->page, PAGE_SIZE, 0);
Miao Xieaf8e2d12014-10-23 14:42:50 +08001520 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1521 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1522 sblock->no_io_error_seen = 0;
1523 } else {
1524 bio->bi_iter.bi_sector = page->physical >> 9;
1525
1526 if (btrfsic_submit_bio_wait(READ, bio))
1527 sblock->no_io_error_seen = 0;
1528 }
Kent Overstreet33879d42013-11-23 22:33:32 -08001529
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001530 bio_put(bio);
1531 }
1532
1533 if (sblock->no_io_error_seen)
1534 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1535 have_csum, csum, generation,
1536 csum_size);
1537
Stefan Behrens34f5c8e2012-11-02 16:16:26 +01001538 return;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001539}
1540
Miao Xie17a9be22014-07-24 11:37:08 +08001541static inline int scrub_check_fsid(u8 fsid[],
1542 struct scrub_page *spage)
1543{
1544 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1545 int ret;
1546
1547 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1548 return !ret;
1549}
1550
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001551static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1552 struct scrub_block *sblock,
1553 int is_metadata, int have_csum,
1554 const u8 *csum, u64 generation,
1555 u16 csum_size)
1556{
1557 int page_num;
1558 u8 calculated_csum[BTRFS_CSUM_SIZE];
1559 u32 crc = ~(u32)0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001560 void *mapped_buffer;
1561
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001562 WARN_ON(!sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001563 if (is_metadata) {
1564 struct btrfs_header *h;
1565
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001566 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001567 h = (struct btrfs_header *)mapped_buffer;
1568
Qu Wenruo3cae2102013-07-16 11:19:18 +08001569 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
Miao Xie17a9be22014-07-24 11:37:08 +08001570 !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001571 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
Stefan Behrens442a4f62012-05-25 16:06:08 +02001572 BTRFS_UUID_SIZE)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001573 sblock->header_error = 1;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001574 } else if (generation != btrfs_stack_header_generation(h)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001575 sblock->header_error = 1;
1576 sblock->generation_error = 1;
1577 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001578 csum = h->csum;
1579 } else {
1580 if (!have_csum)
1581 return;
1582
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001583 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001584 }
1585
1586 for (page_num = 0;;) {
1587 if (page_num == 0 && is_metadata)
Liu Bob0496682013-03-14 14:57:45 +00001588 crc = btrfs_csum_data(
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001589 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1590 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1591 else
Liu Bob0496682013-03-14 14:57:45 +00001592 crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001593
Linus Torvalds9613beb2012-03-30 12:44:29 -07001594 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001595 page_num++;
1596 if (page_num >= sblock->page_count)
1597 break;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001598 WARN_ON(!sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001599
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001600 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001601 }
1602
1603 btrfs_csum_final(crc, calculated_csum);
1604 if (memcmp(calculated_csum, csum, csum_size))
1605 sblock->checksum_error = 1;
1606}
1607
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001608static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
Zhao Lei114ab502015-01-20 15:11:36 +08001609 struct scrub_block *sblock_good)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001610{
1611 int page_num;
1612 int ret = 0;
1613
1614 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1615 int ret_sub;
1616
1617 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1618 sblock_good,
Zhao Lei114ab502015-01-20 15:11:36 +08001619 page_num, 1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001620 if (ret_sub)
1621 ret = ret_sub;
1622 }
1623
1624 return ret;
1625}
1626
1627static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1628 struct scrub_block *sblock_good,
1629 int page_num, int force_write)
1630{
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001631 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1632 struct scrub_page *page_good = sblock_good->pagev[page_num];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001633
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001634 BUG_ON(page_bad->page == NULL);
1635 BUG_ON(page_good->page == NULL);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001636 if (force_write || sblock_bad->header_error ||
1637 sblock_bad->checksum_error || page_bad->io_error) {
1638 struct bio *bio;
1639 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001640
Stefan Behrensff023aa2012-11-06 11:43:11 +01001641 if (!page_bad->dev->bdev) {
Frank Holtonefe120a2013-12-20 11:37:06 -05001642 printk_ratelimited(KERN_WARNING "BTRFS: "
1643 "scrub_repair_page_from_good_copy(bdev == NULL) "
1644 "is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01001645 return -EIO;
1646 }
1647
Chris Mason9be33952013-05-17 18:30:14 -04001648 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Tsutomu Itohe627ee72012-04-12 16:03:56 -04001649 if (!bio)
1650 return -EIO;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001651 bio->bi_bdev = page_bad->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001652 bio->bi_iter.bi_sector = page_bad->physical >> 9;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001653
1654 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1655 if (PAGE_SIZE != ret) {
1656 bio_put(bio);
1657 return -EIO;
1658 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001659
Kent Overstreet33879d42013-11-23 22:33:32 -08001660 if (btrfsic_submit_bio_wait(WRITE, bio)) {
Stefan Behrens442a4f62012-05-25 16:06:08 +02001661 btrfs_dev_stat_inc_and_print(page_bad->dev,
1662 BTRFS_DEV_STAT_WRITE_ERRS);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001663 btrfs_dev_replace_stats_inc(
1664 &sblock_bad->sctx->dev_root->fs_info->
1665 dev_replace.num_write_errors);
Stefan Behrens442a4f62012-05-25 16:06:08 +02001666 bio_put(bio);
1667 return -EIO;
1668 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001669 bio_put(bio);
1670 }
1671
1672 return 0;
1673}
1674
Stefan Behrensff023aa2012-11-06 11:43:11 +01001675static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1676{
1677 int page_num;
1678
Miao Xie5a6ac9e2014-11-06 17:20:58 +08001679 /*
1680 * This block is used for the check of the parity on the source device,
1681 * so the data needn't be written into the destination device.
1682 */
1683 if (sblock->sparity)
1684 return;
1685
Stefan Behrensff023aa2012-11-06 11:43:11 +01001686 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1687 int ret;
1688
1689 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1690 if (ret)
1691 btrfs_dev_replace_stats_inc(
1692 &sblock->sctx->dev_root->fs_info->dev_replace.
1693 num_write_errors);
1694 }
1695}
1696
1697static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1698 int page_num)
1699{
1700 struct scrub_page *spage = sblock->pagev[page_num];
1701
1702 BUG_ON(spage->page == NULL);
1703 if (spage->io_error) {
1704 void *mapped_buffer = kmap_atomic(spage->page);
1705
1706 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1707 flush_dcache_page(spage->page);
1708 kunmap_atomic(mapped_buffer);
1709 }
1710 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1711}
1712
1713static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1714 struct scrub_page *spage)
1715{
1716 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1717 struct scrub_bio *sbio;
1718 int ret;
1719
1720 mutex_lock(&wr_ctx->wr_lock);
1721again:
1722 if (!wr_ctx->wr_curr_bio) {
1723 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1724 GFP_NOFS);
1725 if (!wr_ctx->wr_curr_bio) {
1726 mutex_unlock(&wr_ctx->wr_lock);
1727 return -ENOMEM;
1728 }
1729 wr_ctx->wr_curr_bio->sctx = sctx;
1730 wr_ctx->wr_curr_bio->page_count = 0;
1731 }
1732 sbio = wr_ctx->wr_curr_bio;
1733 if (sbio->page_count == 0) {
1734 struct bio *bio;
1735
1736 sbio->physical = spage->physical_for_dev_replace;
1737 sbio->logical = spage->logical;
1738 sbio->dev = wr_ctx->tgtdev;
1739 bio = sbio->bio;
1740 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04001741 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001742 if (!bio) {
1743 mutex_unlock(&wr_ctx->wr_lock);
1744 return -ENOMEM;
1745 }
1746 sbio->bio = bio;
1747 }
1748
1749 bio->bi_private = sbio;
1750 bio->bi_end_io = scrub_wr_bio_end_io;
1751 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001752 bio->bi_iter.bi_sector = sbio->physical >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01001753 sbio->err = 0;
1754 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1755 spage->physical_for_dev_replace ||
1756 sbio->logical + sbio->page_count * PAGE_SIZE !=
1757 spage->logical) {
1758 scrub_wr_submit(sctx);
1759 goto again;
1760 }
1761
1762 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1763 if (ret != PAGE_SIZE) {
1764 if (sbio->page_count < 1) {
1765 bio_put(sbio->bio);
1766 sbio->bio = NULL;
1767 mutex_unlock(&wr_ctx->wr_lock);
1768 return -EIO;
1769 }
1770 scrub_wr_submit(sctx);
1771 goto again;
1772 }
1773
1774 sbio->pagev[sbio->page_count] = spage;
1775 scrub_page_get(spage);
1776 sbio->page_count++;
1777 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1778 scrub_wr_submit(sctx);
1779 mutex_unlock(&wr_ctx->wr_lock);
1780
1781 return 0;
1782}
1783
1784static void scrub_wr_submit(struct scrub_ctx *sctx)
1785{
1786 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1787 struct scrub_bio *sbio;
1788
1789 if (!wr_ctx->wr_curr_bio)
1790 return;
1791
1792 sbio = wr_ctx->wr_curr_bio;
1793 wr_ctx->wr_curr_bio = NULL;
1794 WARN_ON(!sbio->bio->bi_bdev);
1795 scrub_pending_bio_inc(sctx);
1796 /* process all writes in a single worker thread. Then the block layer
1797 * orders the requests before sending them to the driver which
1798 * doubled the write performance on spinning disks when measured
1799 * with Linux 3.5 */
1800 btrfsic_submit_bio(WRITE, sbio->bio);
1801}
1802
1803static void scrub_wr_bio_end_io(struct bio *bio, int err)
1804{
1805 struct scrub_bio *sbio = bio->bi_private;
1806 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1807
1808 sbio->err = err;
1809 sbio->bio = bio;
1810
Liu Bo9e0af232014-08-15 23:36:53 +08001811 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1812 scrub_wr_bio_end_io_worker, NULL, NULL);
Qu Wenruo0339ef22014-02-28 10:46:17 +08001813 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001814}
1815
1816static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1817{
1818 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1819 struct scrub_ctx *sctx = sbio->sctx;
1820 int i;
1821
1822 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1823 if (sbio->err) {
1824 struct btrfs_dev_replace *dev_replace =
1825 &sbio->sctx->dev_root->fs_info->dev_replace;
1826
1827 for (i = 0; i < sbio->page_count; i++) {
1828 struct scrub_page *spage = sbio->pagev[i];
1829
1830 spage->io_error = 1;
1831 btrfs_dev_replace_stats_inc(&dev_replace->
1832 num_write_errors);
1833 }
1834 }
1835
1836 for (i = 0; i < sbio->page_count; i++)
1837 scrub_page_put(sbio->pagev[i]);
1838
1839 bio_put(sbio->bio);
1840 kfree(sbio);
1841 scrub_pending_bio_dec(sctx);
1842}
1843
1844static int scrub_checksum(struct scrub_block *sblock)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001845{
1846 u64 flags;
1847 int ret;
1848
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001849 WARN_ON(sblock->page_count < 1);
1850 flags = sblock->pagev[0]->flags;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001851 ret = 0;
1852 if (flags & BTRFS_EXTENT_FLAG_DATA)
1853 ret = scrub_checksum_data(sblock);
1854 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1855 ret = scrub_checksum_tree_block(sblock);
1856 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1857 (void)scrub_checksum_super(sblock);
1858 else
1859 WARN_ON(1);
1860 if (ret)
1861 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01001862
1863 return ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001864}
1865
1866static int scrub_checksum_data(struct scrub_block *sblock)
1867{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001868 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001869 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001870 u8 *on_disk_csum;
1871 struct page *page;
1872 void *buffer;
Arne Jansena2de7332011-03-08 14:14:00 +01001873 u32 crc = ~(u32)0;
1874 int fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001875 u64 len;
1876 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001877
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001878 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001879 if (!sblock->pagev[0]->have_csum)
Arne Jansena2de7332011-03-08 14:14:00 +01001880 return 0;
1881
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001882 on_disk_csum = sblock->pagev[0]->csum;
1883 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001884 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001885
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001886 len = sctx->sectorsize;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001887 index = 0;
1888 for (;;) {
1889 u64 l = min_t(u64, len, PAGE_SIZE);
1890
Liu Bob0496682013-03-14 14:57:45 +00001891 crc = btrfs_csum_data(buffer, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001892 kunmap_atomic(buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001893 len -= l;
1894 if (len == 0)
1895 break;
1896 index++;
1897 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001898 BUG_ON(!sblock->pagev[index]->page);
1899 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001900 buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001901 }
1902
Arne Jansena2de7332011-03-08 14:14:00 +01001903 btrfs_csum_final(crc, csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001904 if (memcmp(csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001905 fail = 1;
1906
Arne Jansena2de7332011-03-08 14:14:00 +01001907 return fail;
1908}
1909
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001910static int scrub_checksum_tree_block(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001911{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001912 struct scrub_ctx *sctx = sblock->sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01001913 struct btrfs_header *h;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01001914 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01001915 struct btrfs_fs_info *fs_info = root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001916 u8 calculated_csum[BTRFS_CSUM_SIZE];
1917 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1918 struct page *page;
1919 void *mapped_buffer;
1920 u64 mapped_size;
1921 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001922 u32 crc = ~(u32)0;
1923 int fail = 0;
1924 int crc_fail = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001925 u64 len;
1926 int index;
1927
1928 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001929 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001930 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001931 h = (struct btrfs_header *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001932 memcpy(on_disk_csum, h->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01001933
1934 /*
1935 * we don't use the getter functions here, as we
1936 * a) don't have an extent buffer and
1937 * b) the page is already kmapped
1938 */
Arne Jansena2de7332011-03-08 14:14:00 +01001939
Qu Wenruo3cae2102013-07-16 11:19:18 +08001940 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001941 ++fail;
1942
Qu Wenruo3cae2102013-07-16 11:19:18 +08001943 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
Arne Jansena2de7332011-03-08 14:14:00 +01001944 ++fail;
1945
Miao Xie17a9be22014-07-24 11:37:08 +08001946 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
Arne Jansena2de7332011-03-08 14:14:00 +01001947 ++fail;
1948
1949 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1950 BTRFS_UUID_SIZE))
1951 ++fail;
1952
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001953 len = sctx->nodesize - BTRFS_CSUM_SIZE;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001954 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1955 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1956 index = 0;
1957 for (;;) {
1958 u64 l = min_t(u64, len, mapped_size);
1959
Liu Bob0496682013-03-14 14:57:45 +00001960 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07001961 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001962 len -= l;
1963 if (len == 0)
1964 break;
1965 index++;
1966 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001967 BUG_ON(!sblock->pagev[index]->page);
1968 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001969 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001970 mapped_size = PAGE_SIZE;
1971 p = mapped_buffer;
1972 }
1973
1974 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001975 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Arne Jansena2de7332011-03-08 14:14:00 +01001976 ++crc_fail;
1977
Arne Jansena2de7332011-03-08 14:14:00 +01001978 return fail || crc_fail;
1979}
1980
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001981static int scrub_checksum_super(struct scrub_block *sblock)
Arne Jansena2de7332011-03-08 14:14:00 +01001982{
1983 struct btrfs_super_block *s;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01001984 struct scrub_ctx *sctx = sblock->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001985 u8 calculated_csum[BTRFS_CSUM_SIZE];
1986 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1987 struct page *page;
1988 void *mapped_buffer;
1989 u64 mapped_size;
1990 void *p;
Arne Jansena2de7332011-03-08 14:14:00 +01001991 u32 crc = ~(u32)0;
Stefan Behrens442a4f62012-05-25 16:06:08 +02001992 int fail_gen = 0;
1993 int fail_cor = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001994 u64 len;
1995 int index;
Arne Jansena2de7332011-03-08 14:14:00 +01001996
Stefan Behrensb5d67f62012-03-27 14:21:27 -04001997 BUG_ON(sblock->page_count < 1);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01001998 page = sblock->pagev[0]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07001999 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002000 s = (struct btrfs_super_block *)mapped_buffer;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002001 memcpy(on_disk_csum, s->csum, sctx->csum_size);
Arne Jansena2de7332011-03-08 14:14:00 +01002002
Qu Wenruo3cae2102013-07-16 11:19:18 +08002003 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002004 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002005
Qu Wenruo3cae2102013-07-16 11:19:18 +08002006 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002007 ++fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002008
Miao Xie17a9be22014-07-24 11:37:08 +08002009 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002010 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002011
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002012 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2013 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2014 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2015 index = 0;
2016 for (;;) {
2017 u64 l = min_t(u64, len, mapped_size);
2018
Liu Bob0496682013-03-14 14:57:45 +00002019 crc = btrfs_csum_data(p, crc, l);
Linus Torvalds9613beb2012-03-30 12:44:29 -07002020 kunmap_atomic(mapped_buffer);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002021 len -= l;
2022 if (len == 0)
2023 break;
2024 index++;
2025 BUG_ON(index >= sblock->page_count);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002026 BUG_ON(!sblock->pagev[index]->page);
2027 page = sblock->pagev[index]->page;
Linus Torvalds9613beb2012-03-30 12:44:29 -07002028 mapped_buffer = kmap_atomic(page);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002029 mapped_size = PAGE_SIZE;
2030 p = mapped_buffer;
2031 }
2032
2033 btrfs_csum_final(crc, calculated_csum);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002034 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
Stefan Behrens442a4f62012-05-25 16:06:08 +02002035 ++fail_cor;
Arne Jansena2de7332011-03-08 14:14:00 +01002036
Stefan Behrens442a4f62012-05-25 16:06:08 +02002037 if (fail_cor + fail_gen) {
Arne Jansena2de7332011-03-08 14:14:00 +01002038 /*
2039 * if we find an error in a super block, we just report it.
2040 * They will get written with the next transaction commit
2041 * anyway
2042 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002043 spin_lock(&sctx->stat_lock);
2044 ++sctx->stat.super_errors;
2045 spin_unlock(&sctx->stat_lock);
Stefan Behrens442a4f62012-05-25 16:06:08 +02002046 if (fail_cor)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002047 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002048 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2049 else
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002050 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
Stefan Behrens442a4f62012-05-25 16:06:08 +02002051 BTRFS_DEV_STAT_GENERATION_ERRS);
Arne Jansena2de7332011-03-08 14:14:00 +01002052 }
2053
Stefan Behrens442a4f62012-05-25 16:06:08 +02002054 return fail_cor + fail_gen;
Arne Jansena2de7332011-03-08 14:14:00 +01002055}
2056
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002057static void scrub_block_get(struct scrub_block *sblock)
2058{
Zhao Lei57019342015-01-20 15:11:45 +08002059 atomic_inc(&sblock->refs);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002060}
2061
2062static void scrub_block_put(struct scrub_block *sblock)
2063{
Zhao Lei57019342015-01-20 15:11:45 +08002064 if (atomic_dec_and_test(&sblock->refs)) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002065 int i;
2066
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002067 if (sblock->sparity)
2068 scrub_parity_put(sblock->sparity);
2069
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002070 for (i = 0; i < sblock->page_count; i++)
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002071 scrub_page_put(sblock->pagev[i]);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002072 kfree(sblock);
2073 }
2074}
2075
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002076static void scrub_page_get(struct scrub_page *spage)
2077{
Zhao Lei57019342015-01-20 15:11:45 +08002078 atomic_inc(&spage->refs);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002079}
2080
2081static void scrub_page_put(struct scrub_page *spage)
2082{
Zhao Lei57019342015-01-20 15:11:45 +08002083 if (atomic_dec_and_test(&spage->refs)) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002084 if (spage->page)
2085 __free_page(spage->page);
2086 kfree(spage);
2087 }
2088}
2089
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002090static void scrub_submit(struct scrub_ctx *sctx)
Arne Jansena2de7332011-03-08 14:14:00 +01002091{
2092 struct scrub_bio *sbio;
2093
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002094 if (sctx->curr == -1)
Stefan Behrens1623ede2012-03-27 14:21:26 -04002095 return;
Arne Jansena2de7332011-03-08 14:14:00 +01002096
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002097 sbio = sctx->bios[sctx->curr];
2098 sctx->curr = -1;
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002099 scrub_pending_bio_inc(sctx);
Omar Sandoval03679ad2015-06-19 11:52:48 -07002100 btrfsic_submit_bio(READ, sbio->bio);
Arne Jansena2de7332011-03-08 14:14:00 +01002101}
2102
Stefan Behrensff023aa2012-11-06 11:43:11 +01002103static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2104 struct scrub_page *spage)
Arne Jansena2de7332011-03-08 14:14:00 +01002105{
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002106 struct scrub_block *sblock = spage->sblock;
Arne Jansena2de7332011-03-08 14:14:00 +01002107 struct scrub_bio *sbio;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002108 int ret;
Arne Jansena2de7332011-03-08 14:14:00 +01002109
2110again:
2111 /*
2112 * grab a fresh bio or wait for one to become available
2113 */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002114 while (sctx->curr == -1) {
2115 spin_lock(&sctx->list_lock);
2116 sctx->curr = sctx->first_free;
2117 if (sctx->curr != -1) {
2118 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2119 sctx->bios[sctx->curr]->next_free = -1;
2120 sctx->bios[sctx->curr]->page_count = 0;
2121 spin_unlock(&sctx->list_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01002122 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002123 spin_unlock(&sctx->list_lock);
2124 wait_event(sctx->list_wait, sctx->first_free != -1);
Arne Jansena2de7332011-03-08 14:14:00 +01002125 }
2126 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002127 sbio = sctx->bios[sctx->curr];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002128 if (sbio->page_count == 0) {
Arne Jansen69f4cb52011-11-11 08:17:10 -05002129 struct bio *bio;
2130
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002131 sbio->physical = spage->physical;
2132 sbio->logical = spage->logical;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002133 sbio->dev = spage->dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002134 bio = sbio->bio;
2135 if (!bio) {
Chris Mason9be33952013-05-17 18:30:14 -04002136 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002137 if (!bio)
2138 return -ENOMEM;
2139 sbio->bio = bio;
2140 }
Arne Jansen69f4cb52011-11-11 08:17:10 -05002141
2142 bio->bi_private = sbio;
2143 bio->bi_end_io = scrub_bio_end_io;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002144 bio->bi_bdev = sbio->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002145 bio->bi_iter.bi_sector = sbio->physical >> 9;
Arne Jansen69f4cb52011-11-11 08:17:10 -05002146 sbio->err = 0;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002147 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2148 spage->physical ||
2149 sbio->logical + sbio->page_count * PAGE_SIZE !=
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002150 spage->logical ||
2151 sbio->dev != spage->dev) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002152 scrub_submit(sctx);
Arne Jansen69f4cb52011-11-11 08:17:10 -05002153 goto again;
2154 }
2155
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002156 sbio->pagev[sbio->page_count] = spage;
2157 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2158 if (ret != PAGE_SIZE) {
2159 if (sbio->page_count < 1) {
2160 bio_put(sbio->bio);
2161 sbio->bio = NULL;
2162 return -EIO;
2163 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002164 scrub_submit(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002165 goto again;
Arne Jansena2de7332011-03-08 14:14:00 +01002166 }
Arne Jansen1bc87792011-05-28 21:57:55 +02002167
Stefan Behrensff023aa2012-11-06 11:43:11 +01002168 scrub_block_get(sblock); /* one for the page added to the bio */
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002169 atomic_inc(&sblock->outstanding_pages);
2170 sbio->page_count++;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002171 if (sbio->page_count == sctx->pages_per_rd_bio)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002172 scrub_submit(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01002173
2174 return 0;
2175}
2176
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002177static void scrub_missing_raid56_end_io(struct bio *bio, int error)
2178{
2179 struct scrub_block *sblock = bio->bi_private;
2180 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2181
2182 if (error)
2183 sblock->no_io_error_seen = 0;
2184
2185 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2186}
2187
2188static void scrub_missing_raid56_worker(struct btrfs_work *work)
2189{
2190 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2191 struct scrub_ctx *sctx = sblock->sctx;
2192 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2193 unsigned int is_metadata;
2194 unsigned int have_csum;
2195 u8 *csum;
2196 u64 generation;
2197 u64 logical;
2198 struct btrfs_device *dev;
2199
2200 is_metadata = !(sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA);
2201 have_csum = sblock->pagev[0]->have_csum;
2202 csum = sblock->pagev[0]->csum;
2203 generation = sblock->pagev[0]->generation;
2204 logical = sblock->pagev[0]->logical;
2205 dev = sblock->pagev[0]->dev;
2206
2207 if (sblock->no_io_error_seen) {
2208 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
2209 have_csum, csum, generation,
2210 sctx->csum_size);
2211 }
2212
2213 if (!sblock->no_io_error_seen) {
2214 spin_lock(&sctx->stat_lock);
2215 sctx->stat.read_errors++;
2216 spin_unlock(&sctx->stat_lock);
2217 printk_ratelimited_in_rcu(KERN_ERR
2218 "BTRFS: I/O error rebulding logical %llu for dev %s\n",
2219 logical, rcu_str_deref(dev->name));
2220 } else if (sblock->header_error || sblock->checksum_error) {
2221 spin_lock(&sctx->stat_lock);
2222 sctx->stat.uncorrectable_errors++;
2223 spin_unlock(&sctx->stat_lock);
2224 printk_ratelimited_in_rcu(KERN_ERR
2225 "BTRFS: failed to rebuild valid logical %llu for dev %s\n",
2226 logical, rcu_str_deref(dev->name));
2227 } else {
2228 scrub_write_block_to_dev_replace(sblock);
2229 }
2230
2231 scrub_block_put(sblock);
2232
2233 if (sctx->is_dev_replace &&
2234 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2235 mutex_lock(&sctx->wr_ctx.wr_lock);
2236 scrub_wr_submit(sctx);
2237 mutex_unlock(&sctx->wr_ctx.wr_lock);
2238 }
2239
2240 scrub_pending_bio_dec(sctx);
2241}
2242
2243static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2244{
2245 struct scrub_ctx *sctx = sblock->sctx;
2246 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2247 u64 length = sblock->page_count * PAGE_SIZE;
2248 u64 logical = sblock->pagev[0]->logical;
2249 struct btrfs_bio *bbio;
2250 struct bio *bio;
2251 struct btrfs_raid_bio *rbio;
2252 int ret;
2253 int i;
2254
2255 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2256 &bbio, 0, 1);
2257 if (ret || !bbio || !bbio->raid_map)
2258 goto bbio_out;
2259
2260 if (WARN_ON(!sctx->is_dev_replace ||
2261 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2262 /*
2263 * We shouldn't be scrubbing a missing device. Even for dev
2264 * replace, we should only get here for RAID 5/6. We either
2265 * managed to mount something with no mirrors remaining or
2266 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2267 */
2268 goto bbio_out;
2269 }
2270
2271 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2272 if (!bio)
2273 goto bbio_out;
2274
2275 bio->bi_iter.bi_sector = logical >> 9;
2276 bio->bi_private = sblock;
2277 bio->bi_end_io = scrub_missing_raid56_end_io;
2278
2279 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2280 if (!rbio)
2281 goto rbio_out;
2282
2283 for (i = 0; i < sblock->page_count; i++) {
2284 struct scrub_page *spage = sblock->pagev[i];
2285
2286 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2287 }
2288
2289 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2290 scrub_missing_raid56_worker, NULL, NULL);
2291 scrub_block_get(sblock);
2292 scrub_pending_bio_inc(sctx);
2293 raid56_submit_missing_rbio(rbio);
2294 return;
2295
2296rbio_out:
2297 bio_put(bio);
2298bbio_out:
2299 btrfs_put_bbio(bbio);
2300 spin_lock(&sctx->stat_lock);
2301 sctx->stat.malloc_errors++;
2302 spin_unlock(&sctx->stat_lock);
2303}
2304
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002305static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002306 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002307 u64 gen, int mirror_num, u8 *csum, int force,
2308 u64 physical_for_dev_replace)
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002309{
2310 struct scrub_block *sblock;
2311 int index;
2312
2313 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2314 if (!sblock) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002315 spin_lock(&sctx->stat_lock);
2316 sctx->stat.malloc_errors++;
2317 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002318 return -ENOMEM;
2319 }
2320
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002321 /* one ref inside this function, plus one for each page added to
2322 * a bio later on */
Zhao Lei57019342015-01-20 15:11:45 +08002323 atomic_set(&sblock->refs, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002324 sblock->sctx = sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002325 sblock->no_io_error_seen = 1;
2326
2327 for (index = 0; len > 0; index++) {
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002328 struct scrub_page *spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002329 u64 l = min_t(u64, len, PAGE_SIZE);
2330
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002331 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2332 if (!spage) {
2333leave_nomem:
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002334 spin_lock(&sctx->stat_lock);
2335 sctx->stat.malloc_errors++;
2336 spin_unlock(&sctx->stat_lock);
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002337 scrub_block_put(sblock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002338 return -ENOMEM;
2339 }
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002340 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2341 scrub_page_get(spage);
2342 sblock->pagev[index] = spage;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002343 spage->sblock = sblock;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002344 spage->dev = dev;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002345 spage->flags = flags;
2346 spage->generation = gen;
2347 spage->logical = logical;
2348 spage->physical = physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002349 spage->physical_for_dev_replace = physical_for_dev_replace;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002350 spage->mirror_num = mirror_num;
2351 if (csum) {
2352 spage->have_csum = 1;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002353 memcpy(spage->csum, csum, sctx->csum_size);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002354 } else {
2355 spage->have_csum = 0;
2356 }
2357 sblock->page_count++;
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002358 spage->page = alloc_page(GFP_NOFS);
2359 if (!spage->page)
2360 goto leave_nomem;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002361 len -= l;
2362 logical += l;
2363 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002364 physical_for_dev_replace += l;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002365 }
2366
Stefan Behrens7a9e9982012-11-02 14:58:04 +01002367 WARN_ON(sblock->page_count == 0);
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002368 if (dev->missing) {
2369 /*
2370 * This case should only be hit for RAID 5/6 device replace. See
2371 * the comment in scrub_missing_raid56_pages() for details.
2372 */
2373 scrub_missing_raid56_pages(sblock);
2374 } else {
2375 for (index = 0; index < sblock->page_count; index++) {
2376 struct scrub_page *spage = sblock->pagev[index];
2377 int ret;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002378
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002379 ret = scrub_add_page_to_rd_bio(sctx, spage);
2380 if (ret) {
2381 scrub_block_put(sblock);
2382 return ret;
2383 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002384 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002385
Omar Sandoval73ff61d2015-06-19 11:52:51 -07002386 if (force)
2387 scrub_submit(sctx);
2388 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002389
2390 /* last one frees, either here or in bio completion for last page */
2391 scrub_block_put(sblock);
2392 return 0;
2393}
2394
2395static void scrub_bio_end_io(struct bio *bio, int err)
2396{
2397 struct scrub_bio *sbio = bio->bi_private;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002398 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002399
2400 sbio->err = err;
2401 sbio->bio = bio;
2402
Qu Wenruo0339ef22014-02-28 10:46:17 +08002403 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002404}
2405
2406static void scrub_bio_end_io_worker(struct btrfs_work *work)
2407{
2408 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002409 struct scrub_ctx *sctx = sbio->sctx;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002410 int i;
2411
Stefan Behrensff023aa2012-11-06 11:43:11 +01002412 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002413 if (sbio->err) {
2414 for (i = 0; i < sbio->page_count; i++) {
2415 struct scrub_page *spage = sbio->pagev[i];
2416
2417 spage->io_error = 1;
2418 spage->sblock->no_io_error_seen = 0;
2419 }
2420 }
2421
2422 /* now complete the scrub_block items that have all pages completed */
2423 for (i = 0; i < sbio->page_count; i++) {
2424 struct scrub_page *spage = sbio->pagev[i];
2425 struct scrub_block *sblock = spage->sblock;
2426
2427 if (atomic_dec_and_test(&sblock->outstanding_pages))
2428 scrub_block_complete(sblock);
2429 scrub_block_put(sblock);
2430 }
2431
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002432 bio_put(sbio->bio);
2433 sbio->bio = NULL;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002434 spin_lock(&sctx->list_lock);
2435 sbio->next_free = sctx->first_free;
2436 sctx->first_free = sbio->index;
2437 spin_unlock(&sctx->list_lock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002438
2439 if (sctx->is_dev_replace &&
2440 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2441 mutex_lock(&sctx->wr_ctx.wr_lock);
2442 scrub_wr_submit(sctx);
2443 mutex_unlock(&sctx->wr_ctx.wr_lock);
2444 }
2445
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01002446 scrub_pending_bio_dec(sctx);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002447}
2448
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002449static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2450 unsigned long *bitmap,
2451 u64 start, u64 len)
2452{
David Sterba9d644a62015-02-20 18:42:11 +01002453 u32 offset;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002454 int nsectors;
2455 int sectorsize = sparity->sctx->dev_root->sectorsize;
2456
2457 if (len >= sparity->stripe_len) {
2458 bitmap_set(bitmap, 0, sparity->nsectors);
2459 return;
2460 }
2461
2462 start -= sparity->logic_start;
David Sterba47c57132015-02-20 18:43:47 +01002463 start = div_u64_rem(start, sparity->stripe_len, &offset);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002464 offset /= sectorsize;
2465 nsectors = (int)len / sectorsize;
2466
2467 if (offset + nsectors <= sparity->nsectors) {
2468 bitmap_set(bitmap, offset, nsectors);
2469 return;
2470 }
2471
2472 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2473 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2474}
2475
2476static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2477 u64 start, u64 len)
2478{
2479 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2480}
2481
2482static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2483 u64 start, u64 len)
2484{
2485 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2486}
2487
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002488static void scrub_block_complete(struct scrub_block *sblock)
2489{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002490 int corrupted = 0;
2491
Stefan Behrensff023aa2012-11-06 11:43:11 +01002492 if (!sblock->no_io_error_seen) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002493 corrupted = 1;
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002494 scrub_handle_errored_block(sblock);
Stefan Behrensff023aa2012-11-06 11:43:11 +01002495 } else {
2496 /*
2497 * if has checksum error, write via repair mechanism in
2498 * dev replace case, otherwise write here in dev replace
2499 * case.
2500 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002501 corrupted = scrub_checksum(sblock);
2502 if (!corrupted && sblock->sctx->is_dev_replace)
Stefan Behrensff023aa2012-11-06 11:43:11 +01002503 scrub_write_block_to_dev_replace(sblock);
2504 }
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002505
2506 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2507 u64 start = sblock->pagev[0]->logical;
2508 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2509 PAGE_SIZE;
2510
2511 scrub_parity_mark_sectors_error(sblock->sparity,
2512 start, end - start);
2513 }
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002514}
2515
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002516static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
Arne Jansena2de7332011-03-08 14:14:00 +01002517 u8 *csum)
2518{
2519 struct btrfs_ordered_sum *sum = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +08002520 unsigned long index;
Arne Jansena2de7332011-03-08 14:14:00 +01002521 unsigned long num_sectors;
Arne Jansena2de7332011-03-08 14:14:00 +01002522
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002523 while (!list_empty(&sctx->csum_list)) {
2524 sum = list_first_entry(&sctx->csum_list,
Arne Jansena2de7332011-03-08 14:14:00 +01002525 struct btrfs_ordered_sum, list);
2526 if (sum->bytenr > logical)
2527 return 0;
2528 if (sum->bytenr + sum->len > logical)
2529 break;
2530
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002531 ++sctx->stat.csum_discards;
Arne Jansena2de7332011-03-08 14:14:00 +01002532 list_del(&sum->list);
2533 kfree(sum);
2534 sum = NULL;
2535 }
2536 if (!sum)
2537 return 0;
2538
Miao Xief51a4a12013-06-19 10:36:09 +08002539 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002540 num_sectors = sum->len / sctx->sectorsize;
Miao Xief51a4a12013-06-19 10:36:09 +08002541 memcpy(csum, sum->sums + index, sctx->csum_size);
2542 if (index == num_sectors - 1) {
Arne Jansena2de7332011-03-08 14:14:00 +01002543 list_del(&sum->list);
2544 kfree(sum);
2545 }
Miao Xief51a4a12013-06-19 10:36:09 +08002546 return 1;
Arne Jansena2de7332011-03-08 14:14:00 +01002547}
2548
2549/* scrub extent tries to collect up to 64 kB for each bio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002550static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002551 u64 physical, struct btrfs_device *dev, u64 flags,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002552 u64 gen, int mirror_num, u64 physical_for_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01002553{
2554 int ret;
2555 u8 csum[BTRFS_CSUM_SIZE];
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002556 u32 blocksize;
2557
2558 if (flags & BTRFS_EXTENT_FLAG_DATA) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002559 blocksize = sctx->sectorsize;
2560 spin_lock(&sctx->stat_lock);
2561 sctx->stat.data_extents_scrubbed++;
2562 sctx->stat.data_bytes_scrubbed += len;
2563 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002564 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002565 blocksize = sctx->nodesize;
2566 spin_lock(&sctx->stat_lock);
2567 sctx->stat.tree_extents_scrubbed++;
2568 sctx->stat.tree_bytes_scrubbed += len;
2569 spin_unlock(&sctx->stat_lock);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002570 } else {
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002571 blocksize = sctx->sectorsize;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002572 WARN_ON(1);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002573 }
Arne Jansena2de7332011-03-08 14:14:00 +01002574
2575 while (len) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04002576 u64 l = min_t(u64, len, blocksize);
Arne Jansena2de7332011-03-08 14:14:00 +01002577 int have_csum = 0;
2578
2579 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2580 /* push csums to sbio */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002581 have_csum = scrub_find_csum(sctx, logical, l, csum);
Arne Jansena2de7332011-03-08 14:14:00 +01002582 if (have_csum == 0)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01002583 ++sctx->stat.no_csum;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002584 if (sctx->is_dev_replace && !have_csum) {
2585 ret = copy_nocow_pages(sctx, logical, l,
2586 mirror_num,
2587 physical_for_dev_replace);
2588 goto behind_scrub_pages;
2589 }
Arne Jansena2de7332011-03-08 14:14:00 +01002590 }
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01002591 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
Stefan Behrensff023aa2012-11-06 11:43:11 +01002592 mirror_num, have_csum ? csum : NULL, 0,
2593 physical_for_dev_replace);
2594behind_scrub_pages:
Arne Jansena2de7332011-03-08 14:14:00 +01002595 if (ret)
2596 return ret;
2597 len -= l;
2598 logical += l;
2599 physical += l;
Stefan Behrensff023aa2012-11-06 11:43:11 +01002600 physical_for_dev_replace += l;
Arne Jansena2de7332011-03-08 14:14:00 +01002601 }
2602 return 0;
2603}
2604
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002605static int scrub_pages_for_parity(struct scrub_parity *sparity,
2606 u64 logical, u64 len,
2607 u64 physical, struct btrfs_device *dev,
2608 u64 flags, u64 gen, int mirror_num, u8 *csum)
2609{
2610 struct scrub_ctx *sctx = sparity->sctx;
2611 struct scrub_block *sblock;
2612 int index;
2613
2614 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2615 if (!sblock) {
2616 spin_lock(&sctx->stat_lock);
2617 sctx->stat.malloc_errors++;
2618 spin_unlock(&sctx->stat_lock);
2619 return -ENOMEM;
2620 }
2621
2622 /* one ref inside this function, plus one for each page added to
2623 * a bio later on */
Zhao Lei57019342015-01-20 15:11:45 +08002624 atomic_set(&sblock->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002625 sblock->sctx = sctx;
2626 sblock->no_io_error_seen = 1;
2627 sblock->sparity = sparity;
2628 scrub_parity_get(sparity);
2629
2630 for (index = 0; len > 0; index++) {
2631 struct scrub_page *spage;
2632 u64 l = min_t(u64, len, PAGE_SIZE);
2633
2634 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2635 if (!spage) {
2636leave_nomem:
2637 spin_lock(&sctx->stat_lock);
2638 sctx->stat.malloc_errors++;
2639 spin_unlock(&sctx->stat_lock);
2640 scrub_block_put(sblock);
2641 return -ENOMEM;
2642 }
2643 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2644 /* For scrub block */
2645 scrub_page_get(spage);
2646 sblock->pagev[index] = spage;
2647 /* For scrub parity */
2648 scrub_page_get(spage);
2649 list_add_tail(&spage->list, &sparity->spages);
2650 spage->sblock = sblock;
2651 spage->dev = dev;
2652 spage->flags = flags;
2653 spage->generation = gen;
2654 spage->logical = logical;
2655 spage->physical = physical;
2656 spage->mirror_num = mirror_num;
2657 if (csum) {
2658 spage->have_csum = 1;
2659 memcpy(spage->csum, csum, sctx->csum_size);
2660 } else {
2661 spage->have_csum = 0;
2662 }
2663 sblock->page_count++;
2664 spage->page = alloc_page(GFP_NOFS);
2665 if (!spage->page)
2666 goto leave_nomem;
2667 len -= l;
2668 logical += l;
2669 physical += l;
2670 }
2671
2672 WARN_ON(sblock->page_count == 0);
2673 for (index = 0; index < sblock->page_count; index++) {
2674 struct scrub_page *spage = sblock->pagev[index];
2675 int ret;
2676
2677 ret = scrub_add_page_to_rd_bio(sctx, spage);
2678 if (ret) {
2679 scrub_block_put(sblock);
2680 return ret;
2681 }
2682 }
2683
2684 /* last one frees, either here or in bio completion for last page */
2685 scrub_block_put(sblock);
2686 return 0;
2687}
2688
2689static int scrub_extent_for_parity(struct scrub_parity *sparity,
2690 u64 logical, u64 len,
2691 u64 physical, struct btrfs_device *dev,
2692 u64 flags, u64 gen, int mirror_num)
2693{
2694 struct scrub_ctx *sctx = sparity->sctx;
2695 int ret;
2696 u8 csum[BTRFS_CSUM_SIZE];
2697 u32 blocksize;
2698
Omar Sandoval4a770892015-06-19 11:52:52 -07002699 if (dev->missing) {
2700 scrub_parity_mark_sectors_error(sparity, logical, len);
2701 return 0;
2702 }
2703
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002704 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2705 blocksize = sctx->sectorsize;
2706 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2707 blocksize = sctx->nodesize;
2708 } else {
2709 blocksize = sctx->sectorsize;
2710 WARN_ON(1);
2711 }
2712
2713 while (len) {
2714 u64 l = min_t(u64, len, blocksize);
2715 int have_csum = 0;
2716
2717 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2718 /* push csums to sbio */
2719 have_csum = scrub_find_csum(sctx, logical, l, csum);
2720 if (have_csum == 0)
2721 goto skip;
2722 }
2723 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2724 flags, gen, mirror_num,
2725 have_csum ? csum : NULL);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002726 if (ret)
2727 return ret;
Dan Carpenter6b6d24b2014-12-12 22:30:00 +03002728skip:
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002729 len -= l;
2730 logical += l;
2731 physical += l;
2732 }
2733 return 0;
2734}
2735
Wang Shilong3b080b22014-04-01 18:01:43 +08002736/*
2737 * Given a physical address, this will calculate it's
2738 * logical offset. if this is a parity stripe, it will return
2739 * the most left data stripe's logical offset.
2740 *
2741 * return 0 if it is a data stripe, 1 means parity stripe.
2742 */
2743static int get_raid56_logic_offset(u64 physical, int num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002744 struct map_lookup *map, u64 *offset,
2745 u64 *stripe_start)
Wang Shilong3b080b22014-04-01 18:01:43 +08002746{
2747 int i;
2748 int j = 0;
2749 u64 stripe_nr;
2750 u64 last_offset;
David Sterba9d644a62015-02-20 18:42:11 +01002751 u32 stripe_index;
2752 u32 rot;
Wang Shilong3b080b22014-04-01 18:01:43 +08002753
2754 last_offset = (physical - map->stripes[num].physical) *
2755 nr_data_stripes(map);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002756 if (stripe_start)
2757 *stripe_start = last_offset;
2758
Wang Shilong3b080b22014-04-01 18:01:43 +08002759 *offset = last_offset;
2760 for (i = 0; i < nr_data_stripes(map); i++) {
2761 *offset = last_offset + i * map->stripe_len;
2762
David Sterbab8b93ad2015-01-16 17:26:13 +01002763 stripe_nr = div_u64(*offset, map->stripe_len);
2764 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
Wang Shilong3b080b22014-04-01 18:01:43 +08002765
2766 /* Work out the disk rotation on this stripe-set */
David Sterba47c57132015-02-20 18:43:47 +01002767 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
Wang Shilong3b080b22014-04-01 18:01:43 +08002768 /* calculate which stripe this data locates */
2769 rot += i;
Wang Shilonge4fbaee2014-04-11 18:32:25 +08002770 stripe_index = rot % map->num_stripes;
Wang Shilong3b080b22014-04-01 18:01:43 +08002771 if (stripe_index == num)
2772 return 0;
2773 if (stripe_index < num)
2774 j++;
2775 }
2776 *offset = last_offset + j * map->stripe_len;
2777 return 1;
2778}
2779
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002780static void scrub_free_parity(struct scrub_parity *sparity)
2781{
2782 struct scrub_ctx *sctx = sparity->sctx;
2783 struct scrub_page *curr, *next;
2784 int nbits;
2785
2786 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2787 if (nbits) {
2788 spin_lock(&sctx->stat_lock);
2789 sctx->stat.read_errors += nbits;
2790 sctx->stat.uncorrectable_errors += nbits;
2791 spin_unlock(&sctx->stat_lock);
2792 }
2793
2794 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2795 list_del_init(&curr->list);
2796 scrub_page_put(curr);
2797 }
2798
2799 kfree(sparity);
2800}
2801
Zhao Lei20b2e302015-06-04 20:09:15 +08002802static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2803{
2804 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2805 work);
2806 struct scrub_ctx *sctx = sparity->sctx;
2807
2808 scrub_free_parity(sparity);
2809 scrub_pending_bio_dec(sctx);
2810}
2811
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002812static void scrub_parity_bio_endio(struct bio *bio, int error)
2813{
2814 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002815
2816 if (error)
2817 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2818 sparity->nsectors);
2819
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002820 bio_put(bio);
Zhao Lei20b2e302015-06-04 20:09:15 +08002821
2822 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2823 scrub_parity_bio_endio_worker, NULL, NULL);
2824 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2825 &sparity->work);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002826}
2827
2828static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2829{
2830 struct scrub_ctx *sctx = sparity->sctx;
2831 struct bio *bio;
2832 struct btrfs_raid_bio *rbio;
2833 struct scrub_page *spage;
2834 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002835 u64 length;
2836 int ret;
2837
2838 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2839 sparity->nsectors))
2840 goto out;
2841
Zhao Leia0dd59d2015-07-21 15:42:26 +08002842 length = sparity->logic_end - sparity->logic_start;
Miao Xie76035972014-11-14 17:45:42 +08002843 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002844 sparity->logic_start,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002845 &length, &bbio, 0, 1);
2846 if (ret || !bbio || !bbio->raid_map)
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002847 goto bbio_out;
2848
2849 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2850 if (!bio)
2851 goto bbio_out;
2852
2853 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2854 bio->bi_private = sparity;
2855 bio->bi_end_io = scrub_parity_bio_endio;
2856
2857 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
Zhao Lei8e5cfb52015-01-20 15:11:33 +08002858 length, sparity->scrub_dev,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002859 sparity->dbitmap,
2860 sparity->nsectors);
2861 if (!rbio)
2862 goto rbio_out;
2863
2864 list_for_each_entry(spage, &sparity->spages, list)
Omar Sandovalb4ee1782015-06-19 11:52:50 -07002865 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002866
2867 scrub_pending_bio_inc(sctx);
2868 raid56_parity_submit_scrub_rbio(rbio);
2869 return;
2870
2871rbio_out:
2872 bio_put(bio);
2873bbio_out:
Zhao Lei6e9606d2015-01-20 15:11:34 +08002874 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002875 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2876 sparity->nsectors);
2877 spin_lock(&sctx->stat_lock);
2878 sctx->stat.malloc_errors++;
2879 spin_unlock(&sctx->stat_lock);
2880out:
2881 scrub_free_parity(sparity);
2882}
2883
2884static inline int scrub_calc_parity_bitmap_len(int nsectors)
2885{
2886 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2887}
2888
2889static void scrub_parity_get(struct scrub_parity *sparity)
2890{
Zhao Lei57019342015-01-20 15:11:45 +08002891 atomic_inc(&sparity->refs);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002892}
2893
2894static void scrub_parity_put(struct scrub_parity *sparity)
2895{
Zhao Lei57019342015-01-20 15:11:45 +08002896 if (!atomic_dec_and_test(&sparity->refs))
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002897 return;
2898
2899 scrub_parity_check_and_repair(sparity);
2900}
2901
2902static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2903 struct map_lookup *map,
2904 struct btrfs_device *sdev,
2905 struct btrfs_path *path,
2906 u64 logic_start,
2907 u64 logic_end)
2908{
2909 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2910 struct btrfs_root *root = fs_info->extent_root;
2911 struct btrfs_root *csum_root = fs_info->csum_root;
2912 struct btrfs_extent_item *extent;
Omar Sandoval4a770892015-06-19 11:52:52 -07002913 struct btrfs_bio *bbio = NULL;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002914 u64 flags;
2915 int ret;
2916 int slot;
2917 struct extent_buffer *l;
2918 struct btrfs_key key;
2919 u64 generation;
2920 u64 extent_logical;
2921 u64 extent_physical;
2922 u64 extent_len;
Omar Sandoval4a770892015-06-19 11:52:52 -07002923 u64 mapped_length;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002924 struct btrfs_device *extent_dev;
2925 struct scrub_parity *sparity;
2926 int nsectors;
2927 int bitmap_len;
2928 int extent_mirror_num;
2929 int stop_loop = 0;
2930
2931 nsectors = map->stripe_len / root->sectorsize;
2932 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2933 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2934 GFP_NOFS);
2935 if (!sparity) {
2936 spin_lock(&sctx->stat_lock);
2937 sctx->stat.malloc_errors++;
2938 spin_unlock(&sctx->stat_lock);
2939 return -ENOMEM;
2940 }
2941
2942 sparity->stripe_len = map->stripe_len;
2943 sparity->nsectors = nsectors;
2944 sparity->sctx = sctx;
2945 sparity->scrub_dev = sdev;
2946 sparity->logic_start = logic_start;
2947 sparity->logic_end = logic_end;
Zhao Lei57019342015-01-20 15:11:45 +08002948 atomic_set(&sparity->refs, 1);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08002949 INIT_LIST_HEAD(&sparity->spages);
2950 sparity->dbitmap = sparity->bitmap;
2951 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2952
2953 ret = 0;
2954 while (logic_start < logic_end) {
2955 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2956 key.type = BTRFS_METADATA_ITEM_KEY;
2957 else
2958 key.type = BTRFS_EXTENT_ITEM_KEY;
2959 key.objectid = logic_start;
2960 key.offset = (u64)-1;
2961
2962 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2963 if (ret < 0)
2964 goto out;
2965
2966 if (ret > 0) {
2967 ret = btrfs_previous_extent_item(root, path, 0);
2968 if (ret < 0)
2969 goto out;
2970 if (ret > 0) {
2971 btrfs_release_path(path);
2972 ret = btrfs_search_slot(NULL, root, &key,
2973 path, 0, 0);
2974 if (ret < 0)
2975 goto out;
2976 }
2977 }
2978
2979 stop_loop = 0;
2980 while (1) {
2981 u64 bytes;
2982
2983 l = path->nodes[0];
2984 slot = path->slots[0];
2985 if (slot >= btrfs_header_nritems(l)) {
2986 ret = btrfs_next_leaf(root, path);
2987 if (ret == 0)
2988 continue;
2989 if (ret < 0)
2990 goto out;
2991
2992 stop_loop = 1;
2993 break;
2994 }
2995 btrfs_item_key_to_cpu(l, &key, slot);
2996
Zhao Leid7cad232015-07-22 13:14:48 +08002997 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2998 key.type != BTRFS_METADATA_ITEM_KEY)
2999 goto next;
3000
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003001 if (key.type == BTRFS_METADATA_ITEM_KEY)
3002 bytes = root->nodesize;
3003 else
3004 bytes = key.offset;
3005
3006 if (key.objectid + bytes <= logic_start)
3007 goto next;
3008
Zhao Leia0dd59d2015-07-21 15:42:26 +08003009 if (key.objectid >= logic_end) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003010 stop_loop = 1;
3011 break;
3012 }
3013
3014 while (key.objectid >= logic_start + map->stripe_len)
3015 logic_start += map->stripe_len;
3016
3017 extent = btrfs_item_ptr(l, slot,
3018 struct btrfs_extent_item);
3019 flags = btrfs_extent_flags(l, extent);
3020 generation = btrfs_extent_generation(l, extent);
3021
Zhao Leia323e812015-07-23 12:29:49 +08003022 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3023 (key.objectid < logic_start ||
3024 key.objectid + bytes >
3025 logic_start + map->stripe_len)) {
3026 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3027 key.objectid, logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003028 goto next;
3029 }
3030again:
3031 extent_logical = key.objectid;
3032 extent_len = bytes;
3033
3034 if (extent_logical < logic_start) {
3035 extent_len -= logic_start - extent_logical;
3036 extent_logical = logic_start;
3037 }
3038
3039 if (extent_logical + extent_len >
3040 logic_start + map->stripe_len)
3041 extent_len = logic_start + map->stripe_len -
3042 extent_logical;
3043
3044 scrub_parity_mark_sectors_data(sparity, extent_logical,
3045 extent_len);
3046
Omar Sandoval4a770892015-06-19 11:52:52 -07003047 mapped_length = extent_len;
3048 ret = btrfs_map_block(fs_info, READ, extent_logical,
3049 &mapped_length, &bbio, 0);
3050 if (!ret) {
3051 if (!bbio || mapped_length < extent_len)
3052 ret = -EIO;
3053 }
3054 if (ret) {
3055 btrfs_put_bbio(bbio);
3056 goto out;
3057 }
3058 extent_physical = bbio->stripes[0].physical;
3059 extent_mirror_num = bbio->mirror_num;
3060 extent_dev = bbio->stripes[0].dev;
3061 btrfs_put_bbio(bbio);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003062
3063 ret = btrfs_lookup_csums_range(csum_root,
3064 extent_logical,
3065 extent_logical + extent_len - 1,
3066 &sctx->csum_list, 1);
3067 if (ret)
3068 goto out;
3069
3070 ret = scrub_extent_for_parity(sparity, extent_logical,
3071 extent_len,
3072 extent_physical,
3073 extent_dev, flags,
3074 generation,
3075 extent_mirror_num);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003076
3077 scrub_free_csums(sctx);
3078
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003079 if (ret)
3080 goto out;
3081
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003082 if (extent_logical + extent_len <
3083 key.objectid + bytes) {
3084 logic_start += map->stripe_len;
3085
3086 if (logic_start >= logic_end) {
3087 stop_loop = 1;
3088 break;
3089 }
3090
3091 if (logic_start < key.objectid + bytes) {
3092 cond_resched();
3093 goto again;
3094 }
3095 }
3096next:
3097 path->slots[0]++;
3098 }
3099
3100 btrfs_release_path(path);
3101
3102 if (stop_loop)
3103 break;
3104
3105 logic_start += map->stripe_len;
3106 }
3107out:
3108 if (ret < 0)
3109 scrub_parity_mark_sectors_error(sparity, logic_start,
Zhao Leia0dd59d2015-07-21 15:42:26 +08003110 logic_end - logic_start);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003111 scrub_parity_put(sparity);
3112 scrub_submit(sctx);
3113 mutex_lock(&sctx->wr_ctx.wr_lock);
3114 scrub_wr_submit(sctx);
3115 mutex_unlock(&sctx->wr_ctx.wr_lock);
3116
3117 btrfs_release_path(path);
3118 return ret < 0 ? ret : 0;
3119}
3120
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003121static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003122 struct map_lookup *map,
3123 struct btrfs_device *scrub_dev,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003124 int num, u64 base, u64 length,
3125 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003126{
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003127 struct btrfs_path *path, *ppath;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003128 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
Arne Jansena2de7332011-03-08 14:14:00 +01003129 struct btrfs_root *root = fs_info->extent_root;
3130 struct btrfs_root *csum_root = fs_info->csum_root;
3131 struct btrfs_extent_item *extent;
Arne Jansene7786c32011-05-28 20:58:38 +00003132 struct blk_plug plug;
Arne Jansena2de7332011-03-08 14:14:00 +01003133 u64 flags;
3134 int ret;
3135 int slot;
Arne Jansena2de7332011-03-08 14:14:00 +01003136 u64 nstripes;
Arne Jansena2de7332011-03-08 14:14:00 +01003137 struct extent_buffer *l;
3138 struct btrfs_key key;
3139 u64 physical;
3140 u64 logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003141 u64 logic_end;
Wang Shilong3b080b22014-04-01 18:01:43 +08003142 u64 physical_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003143 u64 generation;
Jan Schmidte12fa9c2011-06-17 15:55:21 +02003144 int mirror_num;
Arne Jansen7a262852011-06-10 12:39:23 +02003145 struct reada_control *reada1;
3146 struct reada_control *reada2;
3147 struct btrfs_key key_start;
3148 struct btrfs_key key_end;
Arne Jansena2de7332011-03-08 14:14:00 +01003149 u64 increment = map->stripe_len;
3150 u64 offset;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003151 u64 extent_logical;
3152 u64 extent_physical;
3153 u64 extent_len;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003154 u64 stripe_logical;
3155 u64 stripe_end;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003156 struct btrfs_device *extent_dev;
3157 int extent_mirror_num;
Wang Shilong3b080b22014-04-01 18:01:43 +08003158 int stop_loop = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05003159
Wang Shilong3b080b22014-04-01 18:01:43 +08003160 physical = map->stripes[num].physical;
Arne Jansena2de7332011-03-08 14:14:00 +01003161 offset = 0;
David Sterbab8b93ad2015-01-16 17:26:13 +01003162 nstripes = div_u64(length, map->stripe_len);
Arne Jansena2de7332011-03-08 14:14:00 +01003163 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3164 offset = map->stripe_len * num;
3165 increment = map->stripe_len * map->num_stripes;
Jan Schmidt193ea742011-06-13 19:56:54 +02003166 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003167 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3168 int factor = map->num_stripes / map->sub_stripes;
3169 offset = map->stripe_len * (num / map->sub_stripes);
3170 increment = map->stripe_len * factor;
Jan Schmidt193ea742011-06-13 19:56:54 +02003171 mirror_num = num % map->sub_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003172 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3173 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003174 mirror_num = num % map->num_stripes + 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003175 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3176 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003177 mirror_num = num % map->num_stripes + 1;
Zhao Leiffe2d202015-01-20 15:11:44 +08003178 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003179 get_raid56_logic_offset(physical, num, map, &offset, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003180 increment = map->stripe_len * nr_data_stripes(map);
3181 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003182 } else {
3183 increment = map->stripe_len;
Jan Schmidt193ea742011-06-13 19:56:54 +02003184 mirror_num = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003185 }
3186
3187 path = btrfs_alloc_path();
3188 if (!path)
3189 return -ENOMEM;
3190
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003191 ppath = btrfs_alloc_path();
3192 if (!ppath) {
Tsutomu Itoh379d6852015-01-09 17:37:52 +09003193 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003194 return -ENOMEM;
3195 }
3196
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003197 /*
3198 * work on commit root. The related disk blocks are static as
3199 * long as COW is applied. This means, it is save to rewrite
3200 * them to repair disk errors without any race conditions
3201 */
Arne Jansena2de7332011-03-08 14:14:00 +01003202 path->search_commit_root = 1;
3203 path->skip_locking = 1;
3204
Gui Hecheng063c54d2015-01-09 09:39:40 +08003205 ppath->search_commit_root = 1;
3206 ppath->skip_locking = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003207 /*
Arne Jansen7a262852011-06-10 12:39:23 +02003208 * trigger the readahead for extent tree csum tree and wait for
3209 * completion. During readahead, the scrub is officially paused
3210 * to not hold off transaction commits
Arne Jansena2de7332011-03-08 14:14:00 +01003211 */
3212 logical = base + offset;
Wang Shilong3b080b22014-04-01 18:01:43 +08003213 physical_end = physical + nstripes * map->stripe_len;
Zhao Leiffe2d202015-01-20 15:11:44 +08003214 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003215 get_raid56_logic_offset(physical_end, num,
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003216 map, &logic_end, NULL);
Wang Shilong3b080b22014-04-01 18:01:43 +08003217 logic_end += base;
3218 } else {
3219 logic_end = logical + increment * nstripes;
3220 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003221 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003222 atomic_read(&sctx->bios_in_flight) == 0);
Wang Shilongcb7ab022013-12-04 21:16:53 +08003223 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003224
Arne Jansen7a262852011-06-10 12:39:23 +02003225 /* FIXME it might be better to start readahead at commit root */
3226 key_start.objectid = logical;
3227 key_start.type = BTRFS_EXTENT_ITEM_KEY;
3228 key_start.offset = (u64)0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003229 key_end.objectid = logic_end;
Josef Bacik3173a182013-03-07 14:22:04 -05003230 key_end.type = BTRFS_METADATA_ITEM_KEY;
3231 key_end.offset = (u64)-1;
Arne Jansen7a262852011-06-10 12:39:23 +02003232 reada1 = btrfs_reada_add(root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003233
Arne Jansen7a262852011-06-10 12:39:23 +02003234 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3235 key_start.type = BTRFS_EXTENT_CSUM_KEY;
3236 key_start.offset = logical;
3237 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3238 key_end.type = BTRFS_EXTENT_CSUM_KEY;
Wang Shilong3b080b22014-04-01 18:01:43 +08003239 key_end.offset = logic_end;
Arne Jansen7a262852011-06-10 12:39:23 +02003240 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
Arne Jansena2de7332011-03-08 14:14:00 +01003241
Arne Jansen7a262852011-06-10 12:39:23 +02003242 if (!IS_ERR(reada1))
3243 btrfs_reada_wait(reada1);
3244 if (!IS_ERR(reada2))
3245 btrfs_reada_wait(reada2);
Arne Jansena2de7332011-03-08 14:14:00 +01003246
Arne Jansena2de7332011-03-08 14:14:00 +01003247
3248 /*
3249 * collect all data csums for the stripe to avoid seeking during
3250 * the scrub. This might currently (crc32) end up to be about 1MB
3251 */
Arne Jansene7786c32011-05-28 20:58:38 +00003252 blk_start_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003253
Arne Jansena2de7332011-03-08 14:14:00 +01003254 /*
3255 * now find all extents for each stripe and scrub them
3256 */
Arne Jansena2de7332011-03-08 14:14:00 +01003257 ret = 0;
Wang Shilong3b080b22014-04-01 18:01:43 +08003258 while (physical < physical_end) {
Arne Jansena2de7332011-03-08 14:14:00 +01003259 /*
3260 * canceled?
3261 */
3262 if (atomic_read(&fs_info->scrub_cancel_req) ||
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003263 atomic_read(&sctx->cancel_req)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003264 ret = -ECANCELED;
3265 goto out;
3266 }
3267 /*
3268 * check to see if we have to pause
3269 */
3270 if (atomic_read(&fs_info->scrub_pause_req)) {
3271 /* push queued extents */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003272 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003273 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003274 mutex_lock(&sctx->wr_ctx.wr_lock);
3275 scrub_wr_submit(sctx);
3276 mutex_unlock(&sctx->wr_ctx.wr_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003277 wait_event(sctx->list_wait,
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003278 atomic_read(&sctx->bios_in_flight) == 0);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003279 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
Wang Shilong3cb09292013-12-04 21:15:19 +08003280 scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003281 }
3282
Zhao Leif2f66a22015-07-21 12:22:29 +08003283 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3284 ret = get_raid56_logic_offset(physical, num, map,
3285 &logical,
3286 &stripe_logical);
3287 logical += base;
3288 if (ret) {
Zhao Lei79553232015-08-18 17:54:30 +08003289 /* it is parity strip */
Zhao Leif2f66a22015-07-21 12:22:29 +08003290 stripe_logical += base;
Zhao Leia0dd59d2015-07-21 15:42:26 +08003291 stripe_end = stripe_logical + increment;
Zhao Leif2f66a22015-07-21 12:22:29 +08003292 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3293 ppath, stripe_logical,
3294 stripe_end);
3295 if (ret)
3296 goto out;
3297 goto skip;
3298 }
3299 }
3300
Wang Shilong7c76edb2014-01-12 21:38:32 +08003301 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3302 key.type = BTRFS_METADATA_ITEM_KEY;
3303 else
3304 key.type = BTRFS_EXTENT_ITEM_KEY;
Arne Jansena2de7332011-03-08 14:14:00 +01003305 key.objectid = logical;
Liu Bo625f1c8d2013-04-27 02:56:57 +00003306 key.offset = (u64)-1;
Arne Jansena2de7332011-03-08 14:14:00 +01003307
3308 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3309 if (ret < 0)
3310 goto out;
Josef Bacik3173a182013-03-07 14:22:04 -05003311
Arne Jansen8c510322011-06-03 10:09:26 +02003312 if (ret > 0) {
Wang Shilongade2e0b2014-01-12 21:38:33 +08003313 ret = btrfs_previous_extent_item(root, path, 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003314 if (ret < 0)
3315 goto out;
Arne Jansen8c510322011-06-03 10:09:26 +02003316 if (ret > 0) {
3317 /* there's no smaller item, so stick with the
3318 * larger one */
3319 btrfs_release_path(path);
3320 ret = btrfs_search_slot(NULL, root, &key,
3321 path, 0, 0);
3322 if (ret < 0)
3323 goto out;
3324 }
Arne Jansena2de7332011-03-08 14:14:00 +01003325 }
3326
Liu Bo625f1c8d2013-04-27 02:56:57 +00003327 stop_loop = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003328 while (1) {
Josef Bacik3173a182013-03-07 14:22:04 -05003329 u64 bytes;
3330
Arne Jansena2de7332011-03-08 14:14:00 +01003331 l = path->nodes[0];
3332 slot = path->slots[0];
3333 if (slot >= btrfs_header_nritems(l)) {
3334 ret = btrfs_next_leaf(root, path);
3335 if (ret == 0)
3336 continue;
3337 if (ret < 0)
3338 goto out;
3339
Liu Bo625f1c8d2013-04-27 02:56:57 +00003340 stop_loop = 1;
Arne Jansena2de7332011-03-08 14:14:00 +01003341 break;
3342 }
3343 btrfs_item_key_to_cpu(l, &key, slot);
3344
Zhao Leid7cad232015-07-22 13:14:48 +08003345 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3346 key.type != BTRFS_METADATA_ITEM_KEY)
3347 goto next;
3348
Josef Bacik3173a182013-03-07 14:22:04 -05003349 if (key.type == BTRFS_METADATA_ITEM_KEY)
David Sterba707e8a02014-06-04 19:22:26 +02003350 bytes = root->nodesize;
Josef Bacik3173a182013-03-07 14:22:04 -05003351 else
3352 bytes = key.offset;
3353
3354 if (key.objectid + bytes <= logical)
Arne Jansena2de7332011-03-08 14:14:00 +01003355 goto next;
3356
Liu Bo625f1c8d2013-04-27 02:56:57 +00003357 if (key.objectid >= logical + map->stripe_len) {
3358 /* out of this device extent */
3359 if (key.objectid >= logic_end)
3360 stop_loop = 1;
3361 break;
3362 }
Arne Jansena2de7332011-03-08 14:14:00 +01003363
3364 extent = btrfs_item_ptr(l, slot,
3365 struct btrfs_extent_item);
3366 flags = btrfs_extent_flags(l, extent);
3367 generation = btrfs_extent_generation(l, extent);
3368
Zhao Leia323e812015-07-23 12:29:49 +08003369 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3370 (key.objectid < logical ||
3371 key.objectid + bytes >
3372 logical + map->stripe_len)) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003373 btrfs_err(fs_info,
3374 "scrub: tree block %llu spanning "
3375 "stripes, ignored. logical=%llu",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02003376 key.objectid, logical);
Arne Jansena2de7332011-03-08 14:14:00 +01003377 goto next;
3378 }
3379
Liu Bo625f1c8d2013-04-27 02:56:57 +00003380again:
3381 extent_logical = key.objectid;
3382 extent_len = bytes;
3383
Arne Jansena2de7332011-03-08 14:14:00 +01003384 /*
3385 * trim extent to this stripe
3386 */
Liu Bo625f1c8d2013-04-27 02:56:57 +00003387 if (extent_logical < logical) {
3388 extent_len -= logical - extent_logical;
3389 extent_logical = logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003390 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003391 if (extent_logical + extent_len >
Arne Jansena2de7332011-03-08 14:14:00 +01003392 logical + map->stripe_len) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003393 extent_len = logical + map->stripe_len -
3394 extent_logical;
Arne Jansena2de7332011-03-08 14:14:00 +01003395 }
3396
Liu Bo625f1c8d2013-04-27 02:56:57 +00003397 extent_physical = extent_logical - logical + physical;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003398 extent_dev = scrub_dev;
3399 extent_mirror_num = mirror_num;
3400 if (is_dev_replace)
3401 scrub_remap_extent(fs_info, extent_logical,
3402 extent_len, &extent_physical,
3403 &extent_dev,
3404 &extent_mirror_num);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003405
Zhao Leife8cf652015-07-22 13:14:47 +08003406 ret = btrfs_lookup_csums_range(csum_root,
3407 extent_logical,
3408 extent_logical +
3409 extent_len - 1,
3410 &sctx->csum_list, 1);
Arne Jansena2de7332011-03-08 14:14:00 +01003411 if (ret)
3412 goto out;
3413
Liu Bo625f1c8d2013-04-27 02:56:57 +00003414 ret = scrub_extent(sctx, extent_logical, extent_len,
3415 extent_physical, extent_dev, flags,
3416 generation, extent_mirror_num,
Stefan Behrens115930c2013-07-04 16:14:23 +02003417 extent_logical - logical + physical);
Zhao Lei6fa96d72015-07-21 12:22:30 +08003418
3419 scrub_free_csums(sctx);
3420
Liu Bo625f1c8d2013-04-27 02:56:57 +00003421 if (ret)
3422 goto out;
3423
3424 if (extent_logical + extent_len <
3425 key.objectid + bytes) {
Zhao Leiffe2d202015-01-20 15:11:44 +08003426 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
Wang Shilong3b080b22014-04-01 18:01:43 +08003427 /*
3428 * loop until we find next data stripe
3429 * or we have finished all stripes.
3430 */
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003431loop:
3432 physical += map->stripe_len;
3433 ret = get_raid56_logic_offset(physical,
3434 num, map, &logical,
3435 &stripe_logical);
3436 logical += base;
3437
3438 if (ret && physical < physical_end) {
3439 stripe_logical += base;
3440 stripe_end = stripe_logical +
Zhao Leia0dd59d2015-07-21 15:42:26 +08003441 increment;
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003442 ret = scrub_raid56_parity(sctx,
3443 map, scrub_dev, ppath,
3444 stripe_logical,
3445 stripe_end);
3446 if (ret)
3447 goto out;
3448 goto loop;
3449 }
Wang Shilong3b080b22014-04-01 18:01:43 +08003450 } else {
3451 physical += map->stripe_len;
3452 logical += increment;
3453 }
Liu Bo625f1c8d2013-04-27 02:56:57 +00003454 if (logical < key.objectid + bytes) {
3455 cond_resched();
3456 goto again;
3457 }
3458
Wang Shilong3b080b22014-04-01 18:01:43 +08003459 if (physical >= physical_end) {
Liu Bo625f1c8d2013-04-27 02:56:57 +00003460 stop_loop = 1;
3461 break;
3462 }
3463 }
Arne Jansena2de7332011-03-08 14:14:00 +01003464next:
3465 path->slots[0]++;
3466 }
Chris Mason71267332011-05-23 06:30:52 -04003467 btrfs_release_path(path);
Wang Shilong3b080b22014-04-01 18:01:43 +08003468skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003469 logical += increment;
3470 physical += map->stripe_len;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003471 spin_lock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003472 if (stop_loop)
3473 sctx->stat.last_physical = map->stripes[num].physical +
3474 length;
3475 else
3476 sctx->stat.last_physical = physical;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003477 spin_unlock(&sctx->stat_lock);
Liu Bo625f1c8d2013-04-27 02:56:57 +00003478 if (stop_loop)
3479 break;
Arne Jansena2de7332011-03-08 14:14:00 +01003480 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01003481out:
Arne Jansena2de7332011-03-08 14:14:00 +01003482 /* push queued extents */
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003483 scrub_submit(sctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003484 mutex_lock(&sctx->wr_ctx.wr_lock);
3485 scrub_wr_submit(sctx);
3486 mutex_unlock(&sctx->wr_ctx.wr_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003487
Arne Jansene7786c32011-05-28 20:58:38 +00003488 blk_finish_plug(&plug);
Arne Jansena2de7332011-03-08 14:14:00 +01003489 btrfs_free_path(path);
Miao Xie5a6ac9e2014-11-06 17:20:58 +08003490 btrfs_free_path(ppath);
Arne Jansena2de7332011-03-08 14:14:00 +01003491 return ret < 0 ? ret : 0;
3492}
3493
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003494static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003495 struct btrfs_device *scrub_dev,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003496 u64 chunk_offset, u64 length,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003497 u64 dev_offset, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003498{
3499 struct btrfs_mapping_tree *map_tree =
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003500 &sctx->dev_root->fs_info->mapping_tree;
Arne Jansena2de7332011-03-08 14:14:00 +01003501 struct map_lookup *map;
3502 struct extent_map *em;
3503 int i;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003504 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003505
3506 read_lock(&map_tree->map_tree.lock);
3507 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3508 read_unlock(&map_tree->map_tree.lock);
3509
3510 if (!em)
3511 return -EINVAL;
3512
3513 map = (struct map_lookup *)em->bdev;
3514 if (em->start != chunk_offset)
3515 goto out;
3516
3517 if (em->len < length)
3518 goto out;
3519
3520 for (i = 0; i < map->num_stripes; ++i) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003521 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
Arne Jansen859acaf2012-02-09 15:09:02 +01003522 map->stripes[i].physical == dev_offset) {
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003523 ret = scrub_stripe(sctx, map, scrub_dev, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003524 chunk_offset, length,
3525 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003526 if (ret)
3527 goto out;
3528 }
3529 }
3530out:
3531 free_extent_map(em);
3532
3533 return ret;
3534}
3535
3536static noinline_for_stack
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003537int scrub_enumerate_chunks(struct scrub_ctx *sctx,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003538 struct btrfs_device *scrub_dev, u64 start, u64 end,
3539 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003540{
3541 struct btrfs_dev_extent *dev_extent = NULL;
3542 struct btrfs_path *path;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003543 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003544 struct btrfs_fs_info *fs_info = root->fs_info;
3545 u64 length;
Arne Jansena2de7332011-03-08 14:14:00 +01003546 u64 chunk_offset;
Zhaolei55e3a602015-08-05 16:43:30 +08003547 int ret = 0;
Arne Jansena2de7332011-03-08 14:14:00 +01003548 int slot;
3549 struct extent_buffer *l;
3550 struct btrfs_key key;
3551 struct btrfs_key found_key;
3552 struct btrfs_block_group_cache *cache;
Stefan Behrensff023aa2012-11-06 11:43:11 +01003553 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
Arne Jansena2de7332011-03-08 14:14:00 +01003554
3555 path = btrfs_alloc_path();
3556 if (!path)
3557 return -ENOMEM;
3558
3559 path->reada = 2;
3560 path->search_commit_root = 1;
3561 path->skip_locking = 1;
3562
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003563 key.objectid = scrub_dev->devid;
Arne Jansena2de7332011-03-08 14:14:00 +01003564 key.offset = 0ull;
3565 key.type = BTRFS_DEV_EXTENT_KEY;
3566
Arne Jansena2de7332011-03-08 14:14:00 +01003567 while (1) {
3568 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3569 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003570 break;
3571 if (ret > 0) {
3572 if (path->slots[0] >=
3573 btrfs_header_nritems(path->nodes[0])) {
3574 ret = btrfs_next_leaf(root, path);
Zhaolei55e3a602015-08-05 16:43:30 +08003575 if (ret < 0)
Arne Jansen8c510322011-06-03 10:09:26 +02003576 break;
Zhaolei55e3a602015-08-05 16:43:30 +08003577 if (ret > 0) {
3578 ret = 0;
3579 break;
3580 }
3581 } else {
3582 ret = 0;
Arne Jansen8c510322011-06-03 10:09:26 +02003583 }
3584 }
Arne Jansena2de7332011-03-08 14:14:00 +01003585
3586 l = path->nodes[0];
3587 slot = path->slots[0];
3588
3589 btrfs_item_key_to_cpu(l, &found_key, slot);
3590
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003591 if (found_key.objectid != scrub_dev->devid)
Arne Jansena2de7332011-03-08 14:14:00 +01003592 break;
3593
David Sterba962a2982014-06-04 18:41:45 +02003594 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
Arne Jansena2de7332011-03-08 14:14:00 +01003595 break;
3596
3597 if (found_key.offset >= end)
3598 break;
3599
3600 if (found_key.offset < key.offset)
3601 break;
3602
3603 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3604 length = btrfs_dev_extent_length(l, dev_extent);
3605
Qu Wenruoced96ed2014-06-19 10:42:51 +08003606 if (found_key.offset + length <= start)
3607 goto skip;
Arne Jansena2de7332011-03-08 14:14:00 +01003608
Arne Jansena2de7332011-03-08 14:14:00 +01003609 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3610
3611 /*
3612 * get a reference on the corresponding block group to prevent
3613 * the chunk from going away while we scrub it
3614 */
3615 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
Qu Wenruoced96ed2014-06-19 10:42:51 +08003616
3617 /* some chunks are removed but not committed to disk yet,
3618 * continue scrubbing */
3619 if (!cache)
3620 goto skip;
3621
Zhaolei55e3a602015-08-05 16:43:30 +08003622 /*
3623 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3624 * to avoid deadlock caused by:
3625 * btrfs_inc_block_group_ro()
3626 * -> btrfs_wait_for_commit()
3627 * -> btrfs_commit_transaction()
3628 * -> btrfs_scrub_pause()
3629 */
3630 scrub_pause_on(fs_info);
3631 ret = btrfs_inc_block_group_ro(root, cache);
3632 scrub_pause_off(fs_info);
3633 if (ret) {
3634 btrfs_put_block_group(cache);
3635 break;
3636 }
3637
Stefan Behrensff023aa2012-11-06 11:43:11 +01003638 dev_replace->cursor_right = found_key.offset + length;
3639 dev_replace->cursor_left = found_key.offset;
3640 dev_replace->item_needs_writeback = 1;
Zhao Lei8c204c92015-08-19 15:02:40 +08003641 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3642 found_key.offset, is_dev_replace);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003643
3644 /*
3645 * flush, submit all pending read and write bios, afterwards
3646 * wait for them.
3647 * Note that in the dev replace case, a read request causes
3648 * write requests that are submitted in the read completion
3649 * worker. Therefore in the current situation, it is required
3650 * that all write requests are flushed, so that all read and
3651 * write requests are really completed when bios_in_flight
3652 * changes to 0.
3653 */
3654 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3655 scrub_submit(sctx);
3656 mutex_lock(&sctx->wr_ctx.wr_lock);
3657 scrub_wr_submit(sctx);
3658 mutex_unlock(&sctx->wr_ctx.wr_lock);
3659
3660 wait_event(sctx->list_wait,
3661 atomic_read(&sctx->bios_in_flight) == 0);
Zhaoleib708ce92015-08-05 16:43:29 +08003662
3663 scrub_pause_on(fs_info);
Wang Shilong12cf9372014-02-19 19:24:17 +08003664
3665 /*
3666 * must be called before we decrease @scrub_paused.
3667 * make sure we don't block transaction commit while
3668 * we are waiting pending workers finished.
3669 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003670 wait_event(sctx->list_wait,
3671 atomic_read(&sctx->workers_pending) == 0);
Wang Shilong12cf9372014-02-19 19:24:17 +08003672 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3673
Zhaoleib708ce92015-08-05 16:43:29 +08003674 scrub_pause_off(fs_info);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003675
Zhaolei55e3a602015-08-05 16:43:30 +08003676 btrfs_dec_block_group_ro(root, cache);
3677
Arne Jansena2de7332011-03-08 14:14:00 +01003678 btrfs_put_block_group(cache);
3679 if (ret)
3680 break;
Stefan Behrensaf1be4f2012-11-27 17:39:51 +00003681 if (is_dev_replace &&
3682 atomic64_read(&dev_replace->num_write_errors) > 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003683 ret = -EIO;
3684 break;
3685 }
3686 if (sctx->stat.malloc_errors > 0) {
3687 ret = -ENOMEM;
3688 break;
3689 }
Arne Jansena2de7332011-03-08 14:14:00 +01003690
Ilya Dryomov539f3582013-10-07 13:42:57 +03003691 dev_replace->cursor_left = dev_replace->cursor_right;
3692 dev_replace->item_needs_writeback = 1;
Qu Wenruoced96ed2014-06-19 10:42:51 +08003693skip:
Arne Jansena2de7332011-03-08 14:14:00 +01003694 key.offset = found_key.offset + length;
Chris Mason71267332011-05-23 06:30:52 -04003695 btrfs_release_path(path);
Arne Jansena2de7332011-03-08 14:14:00 +01003696 }
3697
Arne Jansena2de7332011-03-08 14:14:00 +01003698 btrfs_free_path(path);
Arne Jansen8c510322011-06-03 10:09:26 +02003699
Zhaolei55e3a602015-08-05 16:43:30 +08003700 return ret;
Arne Jansena2de7332011-03-08 14:14:00 +01003701}
3702
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003703static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3704 struct btrfs_device *scrub_dev)
Arne Jansena2de7332011-03-08 14:14:00 +01003705{
3706 int i;
3707 u64 bytenr;
3708 u64 gen;
3709 int ret;
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003710 struct btrfs_root *root = sctx->dev_root;
Arne Jansena2de7332011-03-08 14:14:00 +01003711
Miao Xie87533c42013-01-29 10:14:48 +00003712 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01003713 return -EIO;
3714
Miao Xie5f546062014-07-24 11:37:09 +08003715 /* Seed devices of a new filesystem has their own generation. */
3716 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3717 gen = scrub_dev->generation;
3718 else
3719 gen = root->fs_info->last_trans_committed;
Arne Jansena2de7332011-03-08 14:14:00 +01003720
3721 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3722 bytenr = btrfs_sb_offset(i);
Miao Xie935e5cc2014-09-03 21:35:33 +08003723 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3724 scrub_dev->commit_total_bytes)
Arne Jansena2de7332011-03-08 14:14:00 +01003725 break;
3726
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003727 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
Stefan Behrensa36cf8b2012-11-02 13:26:57 +01003728 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
Stefan Behrensff023aa2012-11-06 11:43:11 +01003729 NULL, 1, bytenr);
Arne Jansena2de7332011-03-08 14:14:00 +01003730 if (ret)
3731 return ret;
3732 }
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003733 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003734
3735 return 0;
3736}
3737
3738/*
3739 * get a reference count on fs_info->scrub_workers. start worker if necessary
3740 */
Stefan Behrensff023aa2012-11-06 11:43:11 +01003741static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3742 int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003743{
David Sterba6f011052015-02-16 18:34:01 +01003744 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
Qu Wenruo0339ef22014-02-28 10:46:17 +08003745 int max_active = fs_info->thread_pool_size;
Arne Jansena2de7332011-03-08 14:14:00 +01003746
Arne Jansen632dd772011-06-10 12:07:07 +02003747 if (fs_info->scrub_workers_refcnt == 0) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01003748 if (is_dev_replace)
Qu Wenruo0339ef22014-02-28 10:46:17 +08003749 fs_info->scrub_workers =
3750 btrfs_alloc_workqueue("btrfs-scrub", flags,
3751 1, 4);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003752 else
Qu Wenruo0339ef22014-02-28 10:46:17 +08003753 fs_info->scrub_workers =
3754 btrfs_alloc_workqueue("btrfs-scrub", flags,
3755 max_active, 4);
Zhao Leie82afc52015-06-12 20:36:58 +08003756 if (!fs_info->scrub_workers)
3757 goto fail_scrub_workers;
3758
Qu Wenruo0339ef22014-02-28 10:46:17 +08003759 fs_info->scrub_wr_completion_workers =
3760 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3761 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003762 if (!fs_info->scrub_wr_completion_workers)
3763 goto fail_scrub_wr_completion_workers;
3764
Qu Wenruo0339ef22014-02-28 10:46:17 +08003765 fs_info->scrub_nocow_workers =
3766 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
Zhao Leie82afc52015-06-12 20:36:58 +08003767 if (!fs_info->scrub_nocow_workers)
3768 goto fail_scrub_nocow_workers;
Zhao Lei20b2e302015-06-04 20:09:15 +08003769 fs_info->scrub_parity_workers =
3770 btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3771 max_active, 2);
Zhao Leie82afc52015-06-12 20:36:58 +08003772 if (!fs_info->scrub_parity_workers)
3773 goto fail_scrub_parity_workers;
Arne Jansen632dd772011-06-10 12:07:07 +02003774 }
Arne Jansena2de7332011-03-08 14:14:00 +01003775 ++fs_info->scrub_workers_refcnt;
Zhao Leie82afc52015-06-12 20:36:58 +08003776 return 0;
3777
3778fail_scrub_parity_workers:
3779 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3780fail_scrub_nocow_workers:
3781 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3782fail_scrub_wr_completion_workers:
3783 btrfs_destroy_workqueue(fs_info->scrub_workers);
3784fail_scrub_workers:
3785 return -ENOMEM;
Arne Jansena2de7332011-03-08 14:14:00 +01003786}
3787
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003788static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003789{
Stefan Behrensff023aa2012-11-06 11:43:11 +01003790 if (--fs_info->scrub_workers_refcnt == 0) {
Qu Wenruo0339ef22014-02-28 10:46:17 +08003791 btrfs_destroy_workqueue(fs_info->scrub_workers);
3792 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3793 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
Zhao Lei20b2e302015-06-04 20:09:15 +08003794 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003795 }
Arne Jansena2de7332011-03-08 14:14:00 +01003796 WARN_ON(fs_info->scrub_workers_refcnt < 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003797}
3798
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003799int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3800 u64 end, struct btrfs_scrub_progress *progress,
Stefan Behrens63a212a2012-11-05 18:29:28 +01003801 int readonly, int is_dev_replace)
Arne Jansena2de7332011-03-08 14:14:00 +01003802{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003803 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003804 int ret;
3805 struct btrfs_device *dev;
Miao Xie5d68da32014-07-24 11:37:07 +08003806 struct rcu_string *name;
Arne Jansena2de7332011-03-08 14:14:00 +01003807
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003808 if (btrfs_fs_closing(fs_info))
Arne Jansena2de7332011-03-08 14:14:00 +01003809 return -EINVAL;
3810
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003811 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003812 /*
3813 * in this case scrub is unable to calculate the checksum
3814 * the way scrub is implemented. Do not handle this
3815 * situation at all because it won't ever happen.
3816 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003817 btrfs_err(fs_info,
3818 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003819 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003820 return -EINVAL;
3821 }
3822
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003823 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
Stefan Behrensb5d67f62012-03-27 14:21:27 -04003824 /* not supported for data w/o checksums */
Frank Holtonefe120a2013-12-20 11:37:06 -05003825 btrfs_err(fs_info,
3826 "scrub: size assumption sectorsize != PAGE_SIZE "
3827 "(%d != %lu) fails",
Geert Uytterhoeven27f9f022013-08-20 13:20:09 +02003828 fs_info->chunk_root->sectorsize, PAGE_SIZE);
Arne Jansena2de7332011-03-08 14:14:00 +01003829 return -EINVAL;
3830 }
3831
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003832 if (fs_info->chunk_root->nodesize >
3833 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3834 fs_info->chunk_root->sectorsize >
3835 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3836 /*
3837 * would exhaust the array bounds of pagev member in
3838 * struct scrub_block
3839 */
Frank Holtonefe120a2013-12-20 11:37:06 -05003840 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3841 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
Stefan Behrens7a9e9982012-11-02 14:58:04 +01003842 fs_info->chunk_root->nodesize,
3843 SCRUB_MAX_PAGES_PER_BLOCK,
3844 fs_info->chunk_root->sectorsize,
3845 SCRUB_MAX_PAGES_PER_BLOCK);
3846 return -EINVAL;
3847 }
3848
Arne Jansena2de7332011-03-08 14:14:00 +01003849
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003850 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3851 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003852 if (!dev || (dev->missing && !is_dev_replace)) {
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003853 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003854 return -ENODEV;
3855 }
Arne Jansena2de7332011-03-08 14:14:00 +01003856
Miao Xie5d68da32014-07-24 11:37:07 +08003857 if (!is_dev_replace && !readonly && !dev->writeable) {
3858 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3859 rcu_read_lock();
3860 name = rcu_dereference(dev->name);
3861 btrfs_err(fs_info, "scrub: device %s is not writable",
3862 name->str);
3863 rcu_read_unlock();
3864 return -EROFS;
3865 }
3866
Wang Shilong3b7a0162013-10-12 02:11:12 +08003867 mutex_lock(&fs_info->scrub_lock);
Stefan Behrens63a212a2012-11-05 18:29:28 +01003868 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
Arne Jansena2de7332011-03-08 14:14:00 +01003869 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003870 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003871 return -EIO;
Arne Jansena2de7332011-03-08 14:14:00 +01003872 }
3873
Stefan Behrens8dabb742012-11-06 13:15:27 +01003874 btrfs_dev_replace_lock(&fs_info->dev_replace);
3875 if (dev->scrub_device ||
3876 (!is_dev_replace &&
3877 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3878 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003879 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003880 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003881 return -EINPROGRESS;
3882 }
Stefan Behrens8dabb742012-11-06 13:15:27 +01003883 btrfs_dev_replace_unlock(&fs_info->dev_replace);
Wang Shilong3b7a0162013-10-12 02:11:12 +08003884
3885 ret = scrub_workers_get(fs_info, is_dev_replace);
3886 if (ret) {
3887 mutex_unlock(&fs_info->scrub_lock);
3888 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3889 return ret;
3890 }
3891
Stefan Behrens63a212a2012-11-05 18:29:28 +01003892 sctx = scrub_setup_ctx(dev, is_dev_replace);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003893 if (IS_ERR(sctx)) {
Arne Jansena2de7332011-03-08 14:14:00 +01003894 mutex_unlock(&fs_info->scrub_lock);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003895 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3896 scrub_workers_put(fs_info);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003897 return PTR_ERR(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003898 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003899 sctx->readonly = readonly;
3900 dev->scrub_device = sctx;
Wang Shilong3cb09292013-12-04 21:15:19 +08003901 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Arne Jansena2de7332011-03-08 14:14:00 +01003902
Wang Shilong3cb09292013-12-04 21:15:19 +08003903 /*
3904 * checking @scrub_pause_req here, we can avoid
3905 * race between committing transaction and scrubbing.
3906 */
Wang Shilongcb7ab022013-12-04 21:16:53 +08003907 __scrub_blocked_if_needed(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003908 atomic_inc(&fs_info->scrubs_running);
3909 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003910
Stefan Behrensff023aa2012-11-06 11:43:11 +01003911 if (!is_dev_replace) {
Wang Shilong9b011ad2013-10-25 19:12:02 +08003912 /*
3913 * by holding device list mutex, we can
3914 * kick off writing super in log tree sync.
3915 */
Wang Shilong3cb09292013-12-04 21:15:19 +08003916 mutex_lock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003917 ret = scrub_supers(sctx, dev);
Wang Shilong3cb09292013-12-04 21:15:19 +08003918 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
Stefan Behrensff023aa2012-11-06 11:43:11 +01003919 }
Arne Jansena2de7332011-03-08 14:14:00 +01003920
3921 if (!ret)
Stefan Behrensff023aa2012-11-06 11:43:11 +01003922 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3923 is_dev_replace);
Arne Jansena2de7332011-03-08 14:14:00 +01003924
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003925 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
Arne Jansena2de7332011-03-08 14:14:00 +01003926 atomic_dec(&fs_info->scrubs_running);
3927 wake_up(&fs_info->scrub_pause_wait);
3928
Stefan Behrensb6bfebc2012-11-02 16:44:58 +01003929 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
Jan Schmidt0ef8e452011-06-13 20:04:15 +02003930
Arne Jansena2de7332011-03-08 14:14:00 +01003931 if (progress)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003932 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01003933
3934 mutex_lock(&fs_info->scrub_lock);
3935 dev->scrub_device = NULL;
Wang Shilong3b7a0162013-10-12 02:11:12 +08003936 scrub_workers_put(fs_info);
Arne Jansena2de7332011-03-08 14:14:00 +01003937 mutex_unlock(&fs_info->scrub_lock);
3938
Filipe Mananaf55985f2015-02-09 21:14:24 +00003939 scrub_put_ctx(sctx);
Arne Jansena2de7332011-03-08 14:14:00 +01003940
3941 return ret;
3942}
3943
Jeff Mahoney143bede2012-03-01 14:56:26 +01003944void btrfs_scrub_pause(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003945{
3946 struct btrfs_fs_info *fs_info = root->fs_info;
3947
3948 mutex_lock(&fs_info->scrub_lock);
3949 atomic_inc(&fs_info->scrub_pause_req);
3950 while (atomic_read(&fs_info->scrubs_paused) !=
3951 atomic_read(&fs_info->scrubs_running)) {
3952 mutex_unlock(&fs_info->scrub_lock);
3953 wait_event(fs_info->scrub_pause_wait,
3954 atomic_read(&fs_info->scrubs_paused) ==
3955 atomic_read(&fs_info->scrubs_running));
3956 mutex_lock(&fs_info->scrub_lock);
3957 }
3958 mutex_unlock(&fs_info->scrub_lock);
Arne Jansena2de7332011-03-08 14:14:00 +01003959}
3960
Jeff Mahoney143bede2012-03-01 14:56:26 +01003961void btrfs_scrub_continue(struct btrfs_root *root)
Arne Jansena2de7332011-03-08 14:14:00 +01003962{
3963 struct btrfs_fs_info *fs_info = root->fs_info;
3964
3965 atomic_dec(&fs_info->scrub_pause_req);
3966 wake_up(&fs_info->scrub_pause_wait);
Arne Jansena2de7332011-03-08 14:14:00 +01003967}
3968
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003969int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
Arne Jansena2de7332011-03-08 14:14:00 +01003970{
Arne Jansena2de7332011-03-08 14:14:00 +01003971 mutex_lock(&fs_info->scrub_lock);
3972 if (!atomic_read(&fs_info->scrubs_running)) {
3973 mutex_unlock(&fs_info->scrub_lock);
3974 return -ENOTCONN;
3975 }
3976
3977 atomic_inc(&fs_info->scrub_cancel_req);
3978 while (atomic_read(&fs_info->scrubs_running)) {
3979 mutex_unlock(&fs_info->scrub_lock);
3980 wait_event(fs_info->scrub_pause_wait,
3981 atomic_read(&fs_info->scrubs_running) == 0);
3982 mutex_lock(&fs_info->scrub_lock);
3983 }
3984 atomic_dec(&fs_info->scrub_cancel_req);
3985 mutex_unlock(&fs_info->scrub_lock);
3986
3987 return 0;
3988}
3989
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01003990int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3991 struct btrfs_device *dev)
Jeff Mahoney49b25e02012-03-01 17:24:58 +01003992{
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003993 struct scrub_ctx *sctx;
Arne Jansena2de7332011-03-08 14:14:00 +01003994
3995 mutex_lock(&fs_info->scrub_lock);
Stefan Behrensd9d181c2012-11-02 09:58:09 +01003996 sctx = dev->scrub_device;
3997 if (!sctx) {
Arne Jansena2de7332011-03-08 14:14:00 +01003998 mutex_unlock(&fs_info->scrub_lock);
3999 return -ENOTCONN;
4000 }
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004001 atomic_inc(&sctx->cancel_req);
Arne Jansena2de7332011-03-08 14:14:00 +01004002 while (dev->scrub_device) {
4003 mutex_unlock(&fs_info->scrub_lock);
4004 wait_event(fs_info->scrub_pause_wait,
4005 dev->scrub_device == NULL);
4006 mutex_lock(&fs_info->scrub_lock);
4007 }
4008 mutex_unlock(&fs_info->scrub_lock);
4009
4010 return 0;
4011}
Stefan Behrens1623ede2012-03-27 14:21:26 -04004012
Arne Jansena2de7332011-03-08 14:14:00 +01004013int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4014 struct btrfs_scrub_progress *progress)
4015{
4016 struct btrfs_device *dev;
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004017 struct scrub_ctx *sctx = NULL;
Arne Jansena2de7332011-03-08 14:14:00 +01004018
4019 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
Stefan Behrensaa1b8cd2012-11-05 17:03:39 +01004020 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
Arne Jansena2de7332011-03-08 14:14:00 +01004021 if (dev)
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004022 sctx = dev->scrub_device;
4023 if (sctx)
4024 memcpy(progress, &sctx->stat, sizeof(*progress));
Arne Jansena2de7332011-03-08 14:14:00 +01004025 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4026
Stefan Behrensd9d181c2012-11-02 09:58:09 +01004027 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
Arne Jansena2de7332011-03-08 14:14:00 +01004028}
Stefan Behrensff023aa2012-11-06 11:43:11 +01004029
4030static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4031 u64 extent_logical, u64 extent_len,
4032 u64 *extent_physical,
4033 struct btrfs_device **extent_dev,
4034 int *extent_mirror_num)
4035{
4036 u64 mapped_length;
4037 struct btrfs_bio *bbio = NULL;
4038 int ret;
4039
4040 mapped_length = extent_len;
4041 ret = btrfs_map_block(fs_info, READ, extent_logical,
4042 &mapped_length, &bbio, 0);
4043 if (ret || !bbio || mapped_length < extent_len ||
4044 !bbio->stripes[0].dev->bdev) {
Zhao Lei6e9606d2015-01-20 15:11:34 +08004045 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004046 return;
4047 }
4048
4049 *extent_physical = bbio->stripes[0].physical;
4050 *extent_mirror_num = bbio->mirror_num;
4051 *extent_dev = bbio->stripes[0].dev;
Zhao Lei6e9606d2015-01-20 15:11:34 +08004052 btrfs_put_bbio(bbio);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004053}
4054
4055static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4056 struct scrub_wr_ctx *wr_ctx,
4057 struct btrfs_fs_info *fs_info,
4058 struct btrfs_device *dev,
4059 int is_dev_replace)
4060{
4061 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4062
4063 mutex_init(&wr_ctx->wr_lock);
4064 wr_ctx->wr_curr_bio = NULL;
4065 if (!is_dev_replace)
4066 return 0;
4067
4068 WARN_ON(!dev->bdev);
4069 wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
4070 bio_get_nr_vecs(dev->bdev));
4071 wr_ctx->tgtdev = dev;
4072 atomic_set(&wr_ctx->flush_all_writes, 0);
4073 return 0;
4074}
4075
4076static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4077{
4078 mutex_lock(&wr_ctx->wr_lock);
4079 kfree(wr_ctx->wr_curr_bio);
4080 wr_ctx->wr_curr_bio = NULL;
4081 mutex_unlock(&wr_ctx->wr_lock);
4082}
4083
4084static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4085 int mirror_num, u64 physical_for_dev_replace)
4086{
4087 struct scrub_copy_nocow_ctx *nocow_ctx;
4088 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4089
4090 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4091 if (!nocow_ctx) {
4092 spin_lock(&sctx->stat_lock);
4093 sctx->stat.malloc_errors++;
4094 spin_unlock(&sctx->stat_lock);
4095 return -ENOMEM;
4096 }
4097
4098 scrub_pending_trans_workers_inc(sctx);
4099
4100 nocow_ctx->sctx = sctx;
4101 nocow_ctx->logical = logical;
4102 nocow_ctx->len = len;
4103 nocow_ctx->mirror_num = mirror_num;
4104 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
Liu Bo9e0af232014-08-15 23:36:53 +08004105 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4106 copy_nocow_pages_worker, NULL, NULL);
Josef Bacik652f25a2013-09-12 16:58:28 -04004107 INIT_LIST_HEAD(&nocow_ctx->inodes);
Qu Wenruo0339ef22014-02-28 10:46:17 +08004108 btrfs_queue_work(fs_info->scrub_nocow_workers,
4109 &nocow_ctx->work);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004110
4111 return 0;
4112}
4113
Josef Bacik652f25a2013-09-12 16:58:28 -04004114static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4115{
4116 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4117 struct scrub_nocow_inode *nocow_inode;
4118
4119 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4120 if (!nocow_inode)
4121 return -ENOMEM;
4122 nocow_inode->inum = inum;
4123 nocow_inode->offset = offset;
4124 nocow_inode->root = root;
4125 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4126 return 0;
4127}
4128
4129#define COPY_COMPLETE 1
4130
Stefan Behrensff023aa2012-11-06 11:43:11 +01004131static void copy_nocow_pages_worker(struct btrfs_work *work)
4132{
4133 struct scrub_copy_nocow_ctx *nocow_ctx =
4134 container_of(work, struct scrub_copy_nocow_ctx, work);
4135 struct scrub_ctx *sctx = nocow_ctx->sctx;
4136 u64 logical = nocow_ctx->logical;
4137 u64 len = nocow_ctx->len;
4138 int mirror_num = nocow_ctx->mirror_num;
4139 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4140 int ret;
4141 struct btrfs_trans_handle *trans = NULL;
4142 struct btrfs_fs_info *fs_info;
4143 struct btrfs_path *path;
4144 struct btrfs_root *root;
4145 int not_written = 0;
4146
4147 fs_info = sctx->dev_root->fs_info;
4148 root = fs_info->extent_root;
4149
4150 path = btrfs_alloc_path();
4151 if (!path) {
4152 spin_lock(&sctx->stat_lock);
4153 sctx->stat.malloc_errors++;
4154 spin_unlock(&sctx->stat_lock);
4155 not_written = 1;
4156 goto out;
4157 }
4158
4159 trans = btrfs_join_transaction(root);
4160 if (IS_ERR(trans)) {
4161 not_written = 1;
4162 goto out;
4163 }
4164
4165 ret = iterate_inodes_from_logical(logical, fs_info, path,
Josef Bacik652f25a2013-09-12 16:58:28 -04004166 record_inode_for_nocow, nocow_ctx);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004167 if (ret != 0 && ret != -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004168 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4169 "phys %llu, len %llu, mir %u, ret %d",
Geert Uytterhoeven118a0a22013-08-20 13:20:10 +02004170 logical, physical_for_dev_replace, len, mirror_num,
4171 ret);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004172 not_written = 1;
4173 goto out;
4174 }
4175
Josef Bacik652f25a2013-09-12 16:58:28 -04004176 btrfs_end_transaction(trans, root);
4177 trans = NULL;
4178 while (!list_empty(&nocow_ctx->inodes)) {
4179 struct scrub_nocow_inode *entry;
4180 entry = list_first_entry(&nocow_ctx->inodes,
4181 struct scrub_nocow_inode,
4182 list);
4183 list_del_init(&entry->list);
4184 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4185 entry->root, nocow_ctx);
4186 kfree(entry);
4187 if (ret == COPY_COMPLETE) {
4188 ret = 0;
4189 break;
4190 } else if (ret) {
4191 break;
4192 }
4193 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004194out:
Josef Bacik652f25a2013-09-12 16:58:28 -04004195 while (!list_empty(&nocow_ctx->inodes)) {
4196 struct scrub_nocow_inode *entry;
4197 entry = list_first_entry(&nocow_ctx->inodes,
4198 struct scrub_nocow_inode,
4199 list);
4200 list_del_init(&entry->list);
4201 kfree(entry);
4202 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004203 if (trans && !IS_ERR(trans))
4204 btrfs_end_transaction(trans, root);
4205 if (not_written)
4206 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4207 num_uncorrectable_read_errors);
4208
4209 btrfs_free_path(path);
4210 kfree(nocow_ctx);
4211
4212 scrub_pending_trans_workers_dec(sctx);
4213}
4214
Gui Hecheng32159242014-11-10 15:36:08 +08004215static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4216 u64 logical)
4217{
4218 struct extent_state *cached_state = NULL;
4219 struct btrfs_ordered_extent *ordered;
4220 struct extent_io_tree *io_tree;
4221 struct extent_map *em;
4222 u64 lockstart = start, lockend = start + len - 1;
4223 int ret = 0;
4224
4225 io_tree = &BTRFS_I(inode)->io_tree;
4226
4227 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4228 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4229 if (ordered) {
4230 btrfs_put_ordered_extent(ordered);
4231 ret = 1;
4232 goto out_unlock;
4233 }
4234
4235 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4236 if (IS_ERR(em)) {
4237 ret = PTR_ERR(em);
4238 goto out_unlock;
4239 }
4240
4241 /*
4242 * This extent does not actually cover the logical extent anymore,
4243 * move on to the next inode.
4244 */
4245 if (em->block_start > logical ||
4246 em->block_start + em->block_len < logical + len) {
4247 free_extent_map(em);
4248 ret = 1;
4249 goto out_unlock;
4250 }
4251 free_extent_map(em);
4252
4253out_unlock:
4254 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4255 GFP_NOFS);
4256 return ret;
4257}
4258
Josef Bacik652f25a2013-09-12 16:58:28 -04004259static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4260 struct scrub_copy_nocow_ctx *nocow_ctx)
Stefan Behrensff023aa2012-11-06 11:43:11 +01004261{
Miao Xie826aa0a2013-06-27 18:50:59 +08004262 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004263 struct btrfs_key key;
Miao Xie826aa0a2013-06-27 18:50:59 +08004264 struct inode *inode;
4265 struct page *page;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004266 struct btrfs_root *local_root;
Josef Bacik652f25a2013-09-12 16:58:28 -04004267 struct extent_io_tree *io_tree;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004268 u64 physical_for_dev_replace;
Gui Hecheng32159242014-11-10 15:36:08 +08004269 u64 nocow_ctx_logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004270 u64 len = nocow_ctx->len;
Miao Xie826aa0a2013-06-27 18:50:59 +08004271 unsigned long index;
Liu Bo6f1c3602013-01-29 03:22:10 +00004272 int srcu_index;
Josef Bacik652f25a2013-09-12 16:58:28 -04004273 int ret = 0;
4274 int err = 0;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004275
4276 key.objectid = root;
4277 key.type = BTRFS_ROOT_ITEM_KEY;
4278 key.offset = (u64)-1;
Liu Bo6f1c3602013-01-29 03:22:10 +00004279
4280 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4281
Stefan Behrensff023aa2012-11-06 11:43:11 +01004282 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
Liu Bo6f1c3602013-01-29 03:22:10 +00004283 if (IS_ERR(local_root)) {
4284 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004285 return PTR_ERR(local_root);
Liu Bo6f1c3602013-01-29 03:22:10 +00004286 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004287
4288 key.type = BTRFS_INODE_ITEM_KEY;
4289 key.objectid = inum;
4290 key.offset = 0;
4291 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
Liu Bo6f1c3602013-01-29 03:22:10 +00004292 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004293 if (IS_ERR(inode))
4294 return PTR_ERR(inode);
4295
Miao Xieedd14002013-06-27 18:51:00 +08004296 /* Avoid truncate/dio/punch hole.. */
4297 mutex_lock(&inode->i_mutex);
4298 inode_dio_wait(inode);
4299
Stefan Behrensff023aa2012-11-06 11:43:11 +01004300 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
Josef Bacik652f25a2013-09-12 16:58:28 -04004301 io_tree = &BTRFS_I(inode)->io_tree;
Gui Hecheng32159242014-11-10 15:36:08 +08004302 nocow_ctx_logical = nocow_ctx->logical;
Josef Bacik652f25a2013-09-12 16:58:28 -04004303
Gui Hecheng32159242014-11-10 15:36:08 +08004304 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4305 if (ret) {
4306 ret = ret > 0 ? 0 : ret;
4307 goto out;
Josef Bacik652f25a2013-09-12 16:58:28 -04004308 }
4309
Stefan Behrensff023aa2012-11-06 11:43:11 +01004310 while (len >= PAGE_CACHE_SIZE) {
Stefan Behrensff023aa2012-11-06 11:43:11 +01004311 index = offset >> PAGE_CACHE_SHIFT;
Miao Xieedd14002013-06-27 18:51:00 +08004312again:
Stefan Behrensff023aa2012-11-06 11:43:11 +01004313 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4314 if (!page) {
Frank Holtonefe120a2013-12-20 11:37:06 -05004315 btrfs_err(fs_info, "find_or_create_page() failed");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004316 ret = -ENOMEM;
Miao Xie826aa0a2013-06-27 18:50:59 +08004317 goto out;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004318 }
4319
4320 if (PageUptodate(page)) {
4321 if (PageDirty(page))
4322 goto next_page;
4323 } else {
4324 ClearPageError(page);
Gui Hecheng32159242014-11-10 15:36:08 +08004325 err = extent_read_full_page(io_tree, page,
Josef Bacik652f25a2013-09-12 16:58:28 -04004326 btrfs_get_extent,
4327 nocow_ctx->mirror_num);
Miao Xie826aa0a2013-06-27 18:50:59 +08004328 if (err) {
4329 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004330 goto next_page;
4331 }
Miao Xieedd14002013-06-27 18:51:00 +08004332
Miao Xie26b258912013-06-27 18:50:58 +08004333 lock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004334 /*
4335 * If the page has been remove from the page cache,
4336 * the data on it is meaningless, because it may be
4337 * old one, the new data may be written into the new
4338 * page in the page cache.
4339 */
4340 if (page->mapping != inode->i_mapping) {
Josef Bacik652f25a2013-09-12 16:58:28 -04004341 unlock_page(page);
Miao Xieedd14002013-06-27 18:51:00 +08004342 page_cache_release(page);
4343 goto again;
4344 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004345 if (!PageUptodate(page)) {
4346 ret = -EIO;
4347 goto next_page;
4348 }
4349 }
Gui Hecheng32159242014-11-10 15:36:08 +08004350
4351 ret = check_extent_to_block(inode, offset, len,
4352 nocow_ctx_logical);
4353 if (ret) {
4354 ret = ret > 0 ? 0 : ret;
4355 goto next_page;
4356 }
4357
Miao Xie826aa0a2013-06-27 18:50:59 +08004358 err = write_page_nocow(nocow_ctx->sctx,
4359 physical_for_dev_replace, page);
4360 if (err)
4361 ret = err;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004362next_page:
Miao Xie826aa0a2013-06-27 18:50:59 +08004363 unlock_page(page);
4364 page_cache_release(page);
4365
4366 if (ret)
4367 break;
4368
Stefan Behrensff023aa2012-11-06 11:43:11 +01004369 offset += PAGE_CACHE_SIZE;
4370 physical_for_dev_replace += PAGE_CACHE_SIZE;
Gui Hecheng32159242014-11-10 15:36:08 +08004371 nocow_ctx_logical += PAGE_CACHE_SIZE;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004372 len -= PAGE_CACHE_SIZE;
4373 }
Josef Bacik652f25a2013-09-12 16:58:28 -04004374 ret = COPY_COMPLETE;
Miao Xie826aa0a2013-06-27 18:50:59 +08004375out:
Miao Xieedd14002013-06-27 18:51:00 +08004376 mutex_unlock(&inode->i_mutex);
Miao Xie826aa0a2013-06-27 18:50:59 +08004377 iput(inode);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004378 return ret;
4379}
4380
4381static int write_page_nocow(struct scrub_ctx *sctx,
4382 u64 physical_for_dev_replace, struct page *page)
4383{
4384 struct bio *bio;
4385 struct btrfs_device *dev;
4386 int ret;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004387
4388 dev = sctx->wr_ctx.tgtdev;
4389 if (!dev)
4390 return -EIO;
4391 if (!dev->bdev) {
4392 printk_ratelimited(KERN_WARNING
Frank Holtonefe120a2013-12-20 11:37:06 -05004393 "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
Stefan Behrensff023aa2012-11-06 11:43:11 +01004394 return -EIO;
4395 }
Chris Mason9be33952013-05-17 18:30:14 -04004396 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
Stefan Behrensff023aa2012-11-06 11:43:11 +01004397 if (!bio) {
4398 spin_lock(&sctx->stat_lock);
4399 sctx->stat.malloc_errors++;
4400 spin_unlock(&sctx->stat_lock);
4401 return -ENOMEM;
4402 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07004403 bio->bi_iter.bi_size = 0;
4404 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
Stefan Behrensff023aa2012-11-06 11:43:11 +01004405 bio->bi_bdev = dev->bdev;
4406 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4407 if (ret != PAGE_CACHE_SIZE) {
4408leave_with_eio:
4409 bio_put(bio);
4410 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4411 return -EIO;
4412 }
Stefan Behrensff023aa2012-11-06 11:43:11 +01004413
Kent Overstreet33879d42013-11-23 22:33:32 -08004414 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
Stefan Behrensff023aa2012-11-06 11:43:11 +01004415 goto leave_with_eio;
4416
4417 bio_put(bio);
4418 return 0;
4419}