blob: 76c92e31afc0a82ef8ba7e19404d271a092db5ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * Base on code in raid1.c. See raid1.c for further copyright information.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110022#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110023#include <linux/blkdev.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040024#include <linux/module.h>
NeilBrownbff61972009-03-31 14:33:13 +110025#include <linux/seq_file.h>
Christian Dietrich8bda4702011-07-27 11:00:36 +100026#include <linux/ratelimit.h>
NeilBrown3ea7daa2012-05-22 13:53:47 +100027#include <linux/kthread.h>
Guoqing Jiangafd75622018-10-18 16:37:41 +080028#include <linux/raid/md_p.h>
NeilBrown109e3762016-11-18 13:22:04 +110029#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110030#include "md.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110031#include "raid10.h"
Trela, Maciejdab8b292010-03-08 16:02:45 +110032#include "raid0.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040033#include "md-bitmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35/*
36 * RAID10 provides a combination of RAID0 and RAID1 functionality.
37 * The layout of data is defined by
38 * chunk_size
39 * raid_disks
40 * near_copies (stored in low byte of layout)
41 * far_copies (stored in second byte of layout)
NeilBrownc93983b2006-06-26 00:27:41 -070042 * far_offset (stored in bit 16 of layout )
Jonathan Brassow475901a2013-02-21 13:28:10 +110043 * use_far_sets (stored in bit 17 of layout )
NeilBrown8bce6d32015-10-22 13:20:15 +110044 * use_far_sets_bugfixed (stored in bit 18 of layout )
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 *
Jonathan Brassow475901a2013-02-21 13:28:10 +110046 * The data to be stored is divided into chunks using chunksize. Each device
47 * is divided into far_copies sections. In each section, chunks are laid out
48 * in a style similar to raid0, but near_copies copies of each chunk is stored
49 * (each on a different drive). The starting device for each section is offset
50 * near_copies from the starting device of the previous section. Thus there
51 * are (near_copies * far_copies) of each chunk, and each is on a different
52 * drive. near_copies and far_copies must be at least one, and their product
53 * is at most raid_disks.
NeilBrownc93983b2006-06-26 00:27:41 -070054 *
55 * If far_offset is true, then the far_copies are handled a bit differently.
Jonathan Brassow475901a2013-02-21 13:28:10 +110056 * The copies are still in different stripes, but instead of being very far
57 * apart on disk, there are adjacent stripes.
58 *
59 * The far and offset algorithms are handled slightly differently if
60 * 'use_far_sets' is true. In this case, the array's devices are grouped into
61 * sets that are (near_copies * far_copies) in size. The far copied stripes
62 * are still shifted by 'near_copies' devices, but this shifting stays confined
63 * to the set rather than the entire array. This is done to improve the number
64 * of device combinations that can fail without causing the array to fail.
65 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
66 * on a device):
67 * A B C D A B C D E
68 * ... ...
69 * D A B C E A B C D
70 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
71 * [A B] [C D] [A B] [C D E]
72 * |...| |...| |...| | ... |
73 * [B A] [D C] [B A] [E C D]
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 */
75
76/*
77 * Number of guaranteed r10bios in case of extreme VM load:
78 */
79#define NR_RAID10_BIOS 256
80
Jonathan Brassow473e87c2012-07-31 10:03:52 +100081/* when we get a read error on a read-only array, we redirect to another
82 * device without failing the first device, or trying to over-write to
83 * correct the read error. To keep track of bad blocks on a per-bio
84 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
85 */
86#define IO_BLOCKED ((struct bio *)1)
87/* When we successfully write to a known bad-block, we need to remove the
88 * bad-block marking which must be done from process context. So we record
89 * the success by setting devs[n].bio to IO_MADE_GOOD
90 */
91#define IO_MADE_GOOD ((struct bio *)2)
92
93#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
94
95/* When there are this many requests queued to be written by
NeilBrown34db0cd2011-10-11 16:50:01 +110096 * the raid10 thread, we become 'congested' to provide back-pressure
97 * for writeback.
98 */
99static int max_queued_requests = 1024;
100
NeilBrowne879a872011-10-11 16:49:02 +1100101static void allow_barrier(struct r10conf *conf);
102static void lower_barrier(struct r10conf *conf);
NeilBrown635f6412013-06-11 14:57:09 +1000103static int _enough(struct r10conf *conf, int previous, int ignore);
NeilBrown1919cbb2016-11-18 16:16:12 +1100104static int enough(struct r10conf *conf, int ignore);
NeilBrown3ea7daa2012-05-22 13:53:47 +1000105static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
106 int *skipped);
107static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200108static void end_reshape_write(struct bio *bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +1000109static void end_reshape(struct r10conf *conf);
NeilBrown0a27ec92006-01-06 00:20:13 -0800110
NeilBrown578b54a2016-11-14 16:30:21 +1100111#define raid10_log(md, fmt, args...) \
112 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
113
Ming Leifb0eb5d2017-07-14 16:14:43 +0800114#include "raid1-10.c"
115
Ming Leif0250612017-03-17 00:12:33 +0800116/*
Ming Leif0250612017-03-17 00:12:33 +0800117 * for resync bio, r10bio pointer can be retrieved from the per-bio
118 * 'struct resync_pages'.
119 */
120static inline struct r10bio *get_resync_r10bio(struct bio *bio)
121{
122 return get_resync_pages(bio)->raid_bio;
123}
124
Al Virodd0fc662005-10-07 07:46:04 +0100125static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
NeilBrowne879a872011-10-11 16:49:02 +1100127 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100128 int size = offsetof(struct r10bio, devs[conf->copies]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
NeilBrown69335ef2011-12-23 10:17:54 +1100130 /* allocate a r10bio with room for raid_disks entries in the
131 * bios array */
Jens Axboe7eaceac2011-03-10 08:52:07 +0100132 return kzalloc(size, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134
135static void r10bio_pool_free(void *r10_bio, void *data)
136{
137 kfree(r10_bio);
138}
139
Guoqing Jiang8db87912017-10-24 15:11:52 +0800140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
NeilBrown0310fa22008-08-05 15:54:14 +1000141/* amount of memory to reserve for resync requests */
142#define RESYNC_WINDOW (1024*1024)
143/* maximum number of concurrent requests, memory permitting */
144#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
Guoqing Jiang4b242e92018-01-19 11:37:56 +0800145#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
Guoqing Jiang8db87912017-10-24 15:11:52 +0800146#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148/*
149 * When performing a resync, we need to read and compare, so
150 * we need as many pages are there are copies.
151 * When performing a recovery, we need 2 bios, one for read,
152 * one for write (we recover only one drive per r10buf)
153 *
154 */
Al Virodd0fc662005-10-07 07:46:04 +0100155static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
NeilBrowne879a872011-10-11 16:49:02 +1100157 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100158 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 struct bio *bio;
Ming Leif0250612017-03-17 00:12:33 +0800160 int j;
161 int nalloc, nalloc_rp;
162 struct resync_pages *rps;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100165 if (!r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
NeilBrown3ea7daa2012-05-22 13:53:47 +1000168 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
169 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 nalloc = conf->copies; /* resync */
171 else
172 nalloc = 2; /* recovery */
173
Ming Leif0250612017-03-17 00:12:33 +0800174 /* allocate once for all bios */
175 if (!conf->have_replacement)
176 nalloc_rp = nalloc;
177 else
178 nalloc_rp = nalloc * 2;
Kees Cook6da2ec52018-06-12 13:55:00 -0700179 rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
Ming Leif0250612017-03-17 00:12:33 +0800180 if (!rps)
181 goto out_free_r10bio;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /*
184 * Allocate bios.
185 */
186 for (j = nalloc ; j-- ; ) {
NeilBrown67465572010-10-26 17:33:54 +1100187 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 if (!bio)
189 goto out_free_bio;
190 r10_bio->devs[j].bio = bio;
NeilBrown69335ef2011-12-23 10:17:54 +1100191 if (!conf->have_replacement)
192 continue;
193 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
194 if (!bio)
195 goto out_free_bio;
196 r10_bio->devs[j].repl_bio = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 }
198 /*
199 * Allocate RESYNC_PAGES data pages and attach them
200 * where needed.
201 */
Ming Leif0250612017-03-17 00:12:33 +0800202 for (j = 0; j < nalloc; j++) {
NeilBrown69335ef2011-12-23 10:17:54 +1100203 struct bio *rbio = r10_bio->devs[j].repl_bio;
Ming Leif0250612017-03-17 00:12:33 +0800204 struct resync_pages *rp, *rp_repl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Ming Leif0250612017-03-17 00:12:33 +0800206 rp = &rps[j];
207 if (rbio)
208 rp_repl = &rps[nalloc + j];
209
210 bio = r10_bio->devs[j].bio;
211
212 if (!j || test_bit(MD_RECOVERY_SYNC,
213 &conf->mddev->recovery)) {
214 if (resync_alloc_pages(rp, gfp_flags))
215 goto out_free_pages;
216 } else {
217 memcpy(rp, &rps[0], sizeof(*rp));
218 resync_get_all_pages(rp);
219 }
220
Ming Leif0250612017-03-17 00:12:33 +0800221 rp->raid_bio = r10_bio;
222 bio->bi_private = rp;
223 if (rbio) {
224 memcpy(rp_repl, rp, sizeof(*rp));
225 rbio->bi_private = rp_repl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
227 }
228
229 return r10_bio;
230
231out_free_pages:
Ming Leif0250612017-03-17 00:12:33 +0800232 while (--j >= 0)
233 resync_free_pages(&rps[j * 2]);
234
majianpeng5fdd2cf2012-05-22 13:55:03 +1000235 j = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236out_free_bio:
majianpeng5fdd2cf2012-05-22 13:55:03 +1000237 for ( ; j < nalloc; j++) {
238 if (r10_bio->devs[j].bio)
239 bio_put(r10_bio->devs[j].bio);
NeilBrown69335ef2011-12-23 10:17:54 +1100240 if (r10_bio->devs[j].repl_bio)
241 bio_put(r10_bio->devs[j].repl_bio);
242 }
Ming Leif0250612017-03-17 00:12:33 +0800243 kfree(rps);
244out_free_r10bio:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 r10bio_pool_free(r10_bio, conf);
246 return NULL;
247}
248
249static void r10buf_pool_free(void *__r10_bio, void *data)
250{
NeilBrowne879a872011-10-11 16:49:02 +1100251 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100252 struct r10bio *r10bio = __r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 int j;
Ming Leif0250612017-03-17 00:12:33 +0800254 struct resync_pages *rp = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Ming Leif0250612017-03-17 00:12:33 +0800256 for (j = conf->copies; j--; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 struct bio *bio = r10bio->devs[j].bio;
Ming Leif0250612017-03-17 00:12:33 +0800258
Guoqing Jiangeb81b322018-04-26 10:56:37 +0800259 if (bio) {
260 rp = get_resync_pages(bio);
261 resync_free_pages(rp);
262 bio_put(bio);
263 }
Ming Leif0250612017-03-17 00:12:33 +0800264
NeilBrown69335ef2011-12-23 10:17:54 +1100265 bio = r10bio->devs[j].repl_bio;
266 if (bio)
267 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
Ming Leif0250612017-03-17 00:12:33 +0800269
270 /* resync pages array stored in the 1st bio's .bi_private */
271 kfree(rp);
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 r10bio_pool_free(r10bio, conf);
274}
275
NeilBrowne879a872011-10-11 16:49:02 +1100276static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
278 int i;
279
280 for (i = 0; i < conf->copies; i++) {
281 struct bio **bio = & r10_bio->devs[i].bio;
NeilBrown749c55e2011-07-28 11:39:24 +1000282 if (!BIO_SPECIAL(*bio))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 bio_put(*bio);
284 *bio = NULL;
NeilBrown69335ef2011-12-23 10:17:54 +1100285 bio = &r10_bio->devs[i].repl_bio;
286 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
287 bio_put(*bio);
288 *bio = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
290}
291
NeilBrown9f2c9d12011-10-11 16:48:43 +1100292static void free_r10bio(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
NeilBrowne879a872011-10-11 16:49:02 +1100294 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 put_all_bios(conf, r10_bio);
Kent Overstreetafeee512018-05-20 18:25:52 -0400297 mempool_free(r10_bio, &conf->r10bio_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
NeilBrown9f2c9d12011-10-11 16:48:43 +1100300static void put_buf(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
NeilBrowne879a872011-10-11 16:49:02 +1100302 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Kent Overstreetafeee512018-05-20 18:25:52 -0400304 mempool_free(r10_bio, &conf->r10buf_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
NeilBrown0a27ec92006-01-06 00:20:13 -0800306 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
NeilBrown9f2c9d12011-10-11 16:48:43 +1100309static void reschedule_retry(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 unsigned long flags;
NeilBrownfd01b882011-10-11 16:47:53 +1100312 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +1100313 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
315 spin_lock_irqsave(&conf->device_lock, flags);
316 list_add(&r10_bio->retry_list, &conf->retry_list);
NeilBrown4443ae12006-01-06 00:20:28 -0800317 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 spin_unlock_irqrestore(&conf->device_lock, flags);
319
Arthur Jones388667b2008-07-25 12:03:38 -0700320 /* wake up frozen array... */
321 wake_up(&conf->wait_barrier);
322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 md_wakeup_thread(mddev->thread);
324}
325
326/*
327 * raid_end_bio_io() is called when we have finished servicing a mirrored
328 * operation and are ready to return a success/failure code to the buffer
329 * cache layer.
330 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100331static void raid_end_bio_io(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 struct bio *bio = r10_bio->master_bio;
NeilBrowne879a872011-10-11 16:49:02 +1100334 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
NeilBrown856e08e2011-07-28 11:39:23 +1000336 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200337 bio->bi_status = BLK_STS_IOERR;
NeilBrownfd16f2e2017-03-15 14:05:13 +1100338
339 bio_endio(bio);
340 /*
341 * Wake up any possible resync thread that waits for the device
342 * to go idle.
343 */
344 allow_barrier(conf);
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 free_r10bio(r10_bio);
347}
348
349/*
350 * Update disk head position estimator based on IRQ completion info.
351 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100352static inline void update_head_pos(int slot, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
NeilBrowne879a872011-10-11 16:49:02 +1100354 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
357 r10_bio->devs[slot].addr + (r10_bio->sectors);
358}
359
Namhyung Kim778ca012011-07-18 17:38:47 +1000360/*
361 * Find the disk number which triggered given bio
362 */
NeilBrowne879a872011-10-11 16:49:02 +1100363static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
NeilBrown69335ef2011-12-23 10:17:54 +1100364 struct bio *bio, int *slotp, int *replp)
Namhyung Kim778ca012011-07-18 17:38:47 +1000365{
366 int slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100367 int repl = 0;
Namhyung Kim778ca012011-07-18 17:38:47 +1000368
NeilBrown69335ef2011-12-23 10:17:54 +1100369 for (slot = 0; slot < conf->copies; slot++) {
Namhyung Kim778ca012011-07-18 17:38:47 +1000370 if (r10_bio->devs[slot].bio == bio)
371 break;
NeilBrown69335ef2011-12-23 10:17:54 +1100372 if (r10_bio->devs[slot].repl_bio == bio) {
373 repl = 1;
374 break;
375 }
376 }
Namhyung Kim778ca012011-07-18 17:38:47 +1000377
378 BUG_ON(slot == conf->copies);
379 update_head_pos(slot, r10_bio);
380
NeilBrown749c55e2011-07-28 11:39:24 +1000381 if (slotp)
382 *slotp = slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100383 if (replp)
384 *replp = repl;
Namhyung Kim778ca012011-07-18 17:38:47 +1000385 return r10_bio->devs[slot].devnum;
386}
387
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200388static void raid10_end_read_request(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200390 int uptodate = !bio->bi_status;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100391 struct r10bio *r10_bio = bio->bi_private;
Colin Ian Kinga0e764c2017-10-11 11:46:54 +0100392 int slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100393 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +1100394 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 slot = r10_bio->read_slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100397 rdev = r10_bio->devs[slot].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /*
399 * this branch is our 'one mirror IO has finished' event handler:
400 */
NeilBrown4443ae12006-01-06 00:20:28 -0800401 update_head_pos(slot, r10_bio);
402
403 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 /*
405 * Set R10BIO_Uptodate in our master bio, so that
406 * we will return a good error code to the higher
407 * levels even if IO on some other mirrored buffer fails.
408 *
409 * The 'master' represents the composite IO operation to
410 * user-side. So if something waits for IO, then it will
411 * wait for the 'master' bio.
412 */
413 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrownfae8cc5e2012-02-14 11:10:10 +1100414 } else {
415 /* If all other devices that store this block have
416 * failed, we want to return the error upwards rather
417 * than fail the last device. Here we redefine
418 * "uptodate" to mean "Don't want to retry"
419 */
NeilBrown635f6412013-06-11 14:57:09 +1000420 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
421 rdev->raid_disk))
NeilBrownfae8cc5e2012-02-14 11:10:10 +1100422 uptodate = 1;
NeilBrownfae8cc5e2012-02-14 11:10:10 +1100423 }
424 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 raid_end_bio_io(r10_bio);
NeilBrownabbf0982011-12-23 10:17:54 +1100426 rdev_dec_pending(rdev, conf->mddev);
NeilBrown4443ae12006-01-06 00:20:28 -0800427 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 /*
NeilBrown7c4e06f2011-05-11 14:53:17 +1000429 * oops, read error - keep the refcount on the rdev
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 */
431 char b[BDEVNAME_SIZE];
NeilBrown08464e02016-11-02 14:16:50 +1100432 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
Christian Dietrich8bda4702011-07-27 11:00:36 +1000433 mdname(conf->mddev),
NeilBrownabbf0982011-12-23 10:17:54 +1100434 bdevname(rdev->bdev, b),
Christian Dietrich8bda4702011-07-27 11:00:36 +1000435 (unsigned long long)r10_bio->sector);
NeilBrown856e08e2011-07-28 11:39:23 +1000436 set_bit(R10BIO_ReadError, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 reschedule_retry(r10_bio);
438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
440
NeilBrown9f2c9d12011-10-11 16:48:43 +1100441static void close_write(struct r10bio *r10_bio)
NeilBrownbd870a12011-07-28 11:39:24 +1000442{
443 /* clear the bitmap if all writes complete successfully */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700444 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
445 r10_bio->sectors,
446 !test_bit(R10BIO_Degraded, &r10_bio->state),
447 0);
NeilBrownbd870a12011-07-28 11:39:24 +1000448 md_write_end(r10_bio->mddev);
449}
450
NeilBrown9f2c9d12011-10-11 16:48:43 +1100451static void one_write_done(struct r10bio *r10_bio)
NeilBrown19d5f832011-09-10 17:21:17 +1000452{
453 if (atomic_dec_and_test(&r10_bio->remaining)) {
454 if (test_bit(R10BIO_WriteError, &r10_bio->state))
455 reschedule_retry(r10_bio);
456 else {
457 close_write(r10_bio);
458 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
459 reschedule_retry(r10_bio);
460 else
461 raid_end_bio_io(r10_bio);
462 }
463 }
464}
465
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200466static void raid10_end_write_request(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
NeilBrown9f2c9d12011-10-11 16:48:43 +1100468 struct r10bio *r10_bio = bio->bi_private;
Namhyung Kim778ca012011-07-18 17:38:47 +1000469 int dev;
NeilBrown749c55e2011-07-28 11:39:24 +1000470 int dec_rdev = 1;
NeilBrowne879a872011-10-11 16:49:02 +1100471 struct r10conf *conf = r10_bio->mddev->private;
NeilBrown475b0322011-12-23 10:17:55 +1100472 int slot, repl;
NeilBrown4ca40c22011-12-23 10:17:55 +1100473 struct md_rdev *rdev = NULL;
NeilBrown1919cbb2016-11-18 16:16:12 +1100474 struct bio *to_put = NULL;
Shaohua Li579ed342016-10-06 14:13:52 -0700475 bool discard_error;
476
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200477 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
NeilBrown475b0322011-12-23 10:17:55 +1100479 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
NeilBrown475b0322011-12-23 10:17:55 +1100481 if (repl)
482 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +1100483 if (!rdev) {
484 smp_rmb();
485 repl = 0;
NeilBrown475b0322011-12-23 10:17:55 +1100486 rdev = conf->mirrors[dev].rdev;
NeilBrown4ca40c22011-12-23 10:17:55 +1100487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 /*
489 * this branch is our 'one mirror IO has finished' event handler:
490 */
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200491 if (bio->bi_status && !discard_error) {
NeilBrown475b0322011-12-23 10:17:55 +1100492 if (repl)
493 /* Never record new bad blocks to replacement,
494 * just fail it.
495 */
496 md_error(rdev->mddev, rdev);
497 else {
498 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +1100499 if (!test_and_set_bit(WantReplacement, &rdev->flags))
500 set_bit(MD_RECOVERY_NEEDED,
501 &rdev->mddev->recovery);
NeilBrown1919cbb2016-11-18 16:16:12 +1100502
NeilBrown475b0322011-12-23 10:17:55 +1100503 dec_rdev = 0;
NeilBrown1919cbb2016-11-18 16:16:12 +1100504 if (test_bit(FailFast, &rdev->flags) &&
505 (bio->bi_opf & MD_FAILFAST)) {
506 md_error(rdev->mddev, rdev);
507 if (!test_bit(Faulty, &rdev->flags))
508 /* This is the only remaining device,
509 * We need to retry the write without
510 * FailFast
511 */
512 set_bit(R10BIO_WriteError, &r10_bio->state);
513 else {
514 r10_bio->devs[slot].bio = NULL;
515 to_put = bio;
516 dec_rdev = 1;
517 }
518 } else
519 set_bit(R10BIO_WriteError, &r10_bio->state);
NeilBrown475b0322011-12-23 10:17:55 +1100520 }
NeilBrown749c55e2011-07-28 11:39:24 +1000521 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 /*
523 * Set R10BIO_Uptodate in our master bio, so that
524 * we will return a good error code for to the higher
525 * levels even if IO on some other mirrored buffer fails.
526 *
527 * The 'master' represents the composite IO operation to
528 * user-side. So if something waits for IO, then it will
529 * wait for the 'master' bio.
530 */
NeilBrown749c55e2011-07-28 11:39:24 +1000531 sector_t first_bad;
532 int bad_sectors;
533
Alex Lyakas3056e3a2013-06-04 20:42:21 +0300534 /*
535 * Do not set R10BIO_Uptodate if the current device is
536 * rebuilding or Faulty. This is because we cannot use
537 * such device for properly reading the data back (we could
538 * potentially use it, if the current write would have felt
539 * before rdev->recovery_offset, but for simplicity we don't
540 * check this here.
541 */
542 if (test_bit(In_sync, &rdev->flags) &&
543 !test_bit(Faulty, &rdev->flags))
544 set_bit(R10BIO_Uptodate, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
NeilBrown749c55e2011-07-28 11:39:24 +1000546 /* Maybe we can clear some bad blocks. */
NeilBrown475b0322011-12-23 10:17:55 +1100547 if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +1000548 r10_bio->devs[slot].addr,
549 r10_bio->sectors,
Shaohua Li579ed342016-10-06 14:13:52 -0700550 &first_bad, &bad_sectors) && !discard_error) {
NeilBrown749c55e2011-07-28 11:39:24 +1000551 bio_put(bio);
NeilBrown475b0322011-12-23 10:17:55 +1100552 if (repl)
553 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
554 else
555 r10_bio->devs[slot].bio = IO_MADE_GOOD;
NeilBrown749c55e2011-07-28 11:39:24 +1000556 dec_rdev = 0;
557 set_bit(R10BIO_MadeGood, &r10_bio->state);
558 }
559 }
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 /*
562 *
563 * Let's see if all mirrored write operations have finished
564 * already.
565 */
NeilBrown19d5f832011-09-10 17:21:17 +1000566 one_write_done(r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +1000567 if (dec_rdev)
NeilBrown884162d2012-11-22 15:12:09 +1100568 rdev_dec_pending(rdev, conf->mddev);
NeilBrown1919cbb2016-11-18 16:16:12 +1100569 if (to_put)
570 bio_put(to_put);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573/*
574 * RAID10 layout manager
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300575 * As well as the chunksize and raid_disks count, there are two
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 * parameters: near_copies and far_copies.
577 * near_copies * far_copies must be <= raid_disks.
578 * Normally one of these will be 1.
579 * If both are 1, we get raid0.
580 * If near_copies == raid_disks, we get raid1.
581 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300582 * Chunks are laid out in raid0 style with near_copies copies of the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 * first chunk, followed by near_copies copies of the next chunk and
584 * so on.
585 * If far_copies > 1, then after 1/far_copies of the array has been assigned
586 * as described above, we start again with a device offset of near_copies.
587 * So we effectively have another copy of the whole array further down all
588 * the drives, but with blocks on different drives.
589 * With this layout, and block is never stored twice on the one device.
590 *
591 * raid10_find_phys finds the sector offset of a given virtual sector
NeilBrownc93983b2006-06-26 00:27:41 -0700592 * on each device that it is on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 *
594 * raid10_find_virt does the reverse mapping, from a device and a
595 * sector offset to a virtual address
596 */
597
NeilBrownf8c9e742012-05-21 09:28:33 +1000598static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
600 int n,f;
601 sector_t sector;
602 sector_t chunk;
603 sector_t stripe;
604 int dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 int slot = 0;
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100606 int last_far_set_start, last_far_set_size;
607
608 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
609 last_far_set_start *= geo->far_set_size;
610
611 last_far_set_size = geo->far_set_size;
612 last_far_set_size += (geo->raid_disks % geo->far_set_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 /* now calculate first sector/dev */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000615 chunk = r10bio->sector >> geo->chunk_shift;
616 sector = r10bio->sector & geo->chunk_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
NeilBrown5cf00fc2012-05-21 09:28:20 +1000618 chunk *= geo->near_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 stripe = chunk;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000620 dev = sector_div(stripe, geo->raid_disks);
621 if (geo->far_offset)
622 stripe *= geo->far_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
NeilBrown5cf00fc2012-05-21 09:28:20 +1000624 sector += stripe << geo->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626 /* and calculate all the others */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000627 for (n = 0; n < geo->near_copies; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 int d = dev;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100629 int set;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 sector_t s = sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 r10bio->devs[slot].devnum = d;
Jonathan Brassow4c0ca262013-02-21 13:28:09 +1100632 r10bio->devs[slot].addr = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 slot++;
634
NeilBrown5cf00fc2012-05-21 09:28:20 +1000635 for (f = 1; f < geo->far_copies; f++) {
Jonathan Brassow475901a2013-02-21 13:28:10 +1100636 set = d / geo->far_set_size;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000637 d += geo->near_copies;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100638
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100639 if ((geo->raid_disks % geo->far_set_size) &&
640 (d > last_far_set_start)) {
641 d -= last_far_set_start;
642 d %= last_far_set_size;
643 d += last_far_set_start;
644 } else {
645 d %= geo->far_set_size;
646 d += geo->far_set_size * set;
647 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000648 s += geo->stride;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 r10bio->devs[slot].devnum = d;
650 r10bio->devs[slot].addr = s;
651 slot++;
652 }
653 dev++;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000654 if (dev >= geo->raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 dev = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000656 sector += (geo->chunk_mask + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
658 }
NeilBrownf8c9e742012-05-21 09:28:33 +1000659}
660
661static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
662{
663 struct geom *geo = &conf->geo;
664
665 if (conf->reshape_progress != MaxSector &&
666 ((r10bio->sector >= conf->reshape_progress) !=
667 conf->mddev->reshape_backwards)) {
668 set_bit(R10BIO_Previous, &r10bio->state);
669 geo = &conf->prev;
670 } else
671 clear_bit(R10BIO_Previous, &r10bio->state);
672
673 __raid10_find_phys(geo, r10bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674}
675
NeilBrowne879a872011-10-11 16:49:02 +1100676static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677{
678 sector_t offset, chunk, vchunk;
NeilBrownf8c9e742012-05-21 09:28:33 +1000679 /* Never use conf->prev as this is only called during resync
680 * or recovery, so reshape isn't happening
681 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000682 struct geom *geo = &conf->geo;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100683 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
684 int far_set_size = geo->far_set_size;
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100685 int last_far_set_start;
686
687 if (geo->raid_disks % geo->far_set_size) {
688 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
689 last_far_set_start *= geo->far_set_size;
690
691 if (dev >= last_far_set_start) {
692 far_set_size = geo->far_set_size;
693 far_set_size += (geo->raid_disks % geo->far_set_size);
694 far_set_start = last_far_set_start;
695 }
696 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
NeilBrown5cf00fc2012-05-21 09:28:20 +1000698 offset = sector & geo->chunk_mask;
699 if (geo->far_offset) {
NeilBrownc93983b2006-06-26 00:27:41 -0700700 int fc;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000701 chunk = sector >> geo->chunk_shift;
702 fc = sector_div(chunk, geo->far_copies);
703 dev -= fc * geo->near_copies;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100704 if (dev < far_set_start)
705 dev += far_set_size;
NeilBrownc93983b2006-06-26 00:27:41 -0700706 } else {
NeilBrown5cf00fc2012-05-21 09:28:20 +1000707 while (sector >= geo->stride) {
708 sector -= geo->stride;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100709 if (dev < (geo->near_copies + far_set_start))
710 dev += far_set_size - geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700711 else
NeilBrown5cf00fc2012-05-21 09:28:20 +1000712 dev -= geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700713 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000714 chunk = sector >> geo->chunk_shift;
NeilBrownc93983b2006-06-26 00:27:41 -0700715 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000716 vchunk = chunk * geo->raid_disks + dev;
717 sector_div(vchunk, geo->near_copies);
718 return (vchunk << geo->chunk_shift) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719}
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721/*
722 * This routine returns the disk from which the requested read should
723 * be done. There is a per-array 'next expected sequential IO' sector
724 * number - if this matches on the next IO then we use the last disk.
725 * There is also a per-disk 'last know head position' sector that is
726 * maintained from IRQ contexts, both the normal and the resync IO
727 * completion handlers update this position correctly. If there is no
728 * perfect sequential match then we pick the disk whose head is closest.
729 *
730 * If there are 2 mirrors in the same 2 devices, performance degrades
731 * because position is mirror, not device based.
732 *
733 * The rdev for the device selected will have nr_pending incremented.
734 */
735
736/*
737 * FIXME: possibly should rethink readbalancing and do it differently
738 * depending on near_copies / far_copies geometry.
739 */
NeilBrown96c3fd12011-12-23 10:17:54 +1100740static struct md_rdev *read_balance(struct r10conf *conf,
741 struct r10bio *r10_bio,
742 int *max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743{
NeilBrownaf3a2cd2010-05-08 08:20:17 +1000744 const sector_t this_sector = r10_bio->sector;
NeilBrown56d99122011-05-11 14:27:03 +1000745 int disk, slot;
NeilBrown856e08e2011-07-28 11:39:23 +1000746 int sectors = r10_bio->sectors;
747 int best_good_sectors;
NeilBrown56d99122011-05-11 14:27:03 +1000748 sector_t new_distance, best_dist;
Jonathan Brassow3bbae042012-07-31 10:03:52 +1000749 struct md_rdev *best_rdev, *rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000750 int do_balance;
751 int best_slot;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000752 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 raid10_find_phys(conf, r10_bio);
755 rcu_read_lock();
NeilBrown56d99122011-05-11 14:27:03 +1000756 best_slot = -1;
NeilBrownabbf0982011-12-23 10:17:54 +1100757 best_rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000758 best_dist = MaxSector;
NeilBrown856e08e2011-07-28 11:39:23 +1000759 best_good_sectors = 0;
NeilBrown56d99122011-05-11 14:27:03 +1000760 do_balance = 1;
NeilBrown8d3ca832016-11-18 16:16:12 +1100761 clear_bit(R10BIO_FailFast, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 /*
763 * Check if we can balance. We can balance on the whole
NeilBrown6cce3b22006-01-06 00:20:16 -0800764 * device if no resync is going on (recovery is ok), or below
765 * the resync window. We take the first readable disk when
766 * above the resync window.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 */
Guoqing Jiangd4098c72017-10-24 15:11:50 +0800768 if ((conf->mddev->recovery_cp < MaxSector
769 && (this_sector + sectors >= conf->next_resync)) ||
770 (mddev_is_clustered(conf->mddev) &&
771 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
772 this_sector + sectors)))
NeilBrown56d99122011-05-11 14:27:03 +1000773 do_balance = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
NeilBrown56d99122011-05-11 14:27:03 +1000775 for (slot = 0; slot < conf->copies ; slot++) {
NeilBrown856e08e2011-07-28 11:39:23 +1000776 sector_t first_bad;
777 int bad_sectors;
778 sector_t dev_sector;
779
NeilBrown56d99122011-05-11 14:27:03 +1000780 if (r10_bio->devs[slot].bio == IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 continue;
NeilBrown56d99122011-05-11 14:27:03 +1000782 disk = r10_bio->devs[slot].devnum;
NeilBrownabbf0982011-12-23 10:17:54 +1100783 rdev = rcu_dereference(conf->mirrors[disk].replacement);
784 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
785 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
786 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown050b6612012-03-19 12:46:39 +1100787 if (rdev == NULL ||
Kent Overstreet8ae12662015-04-27 23:48:34 -0700788 test_bit(Faulty, &rdev->flags))
NeilBrownabbf0982011-12-23 10:17:54 +1100789 continue;
790 if (!test_bit(In_sync, &rdev->flags) &&
791 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
NeilBrown56d99122011-05-11 14:27:03 +1000792 continue;
793
NeilBrown856e08e2011-07-28 11:39:23 +1000794 dev_sector = r10_bio->devs[slot].addr;
795 if (is_badblock(rdev, dev_sector, sectors,
796 &first_bad, &bad_sectors)) {
797 if (best_dist < MaxSector)
798 /* Already have a better slot */
799 continue;
800 if (first_bad <= dev_sector) {
801 /* Cannot read here. If this is the
802 * 'primary' device, then we must not read
803 * beyond 'bad_sectors' from another device.
804 */
805 bad_sectors -= (dev_sector - first_bad);
806 if (!do_balance && sectors > bad_sectors)
807 sectors = bad_sectors;
808 if (best_good_sectors > sectors)
809 best_good_sectors = sectors;
810 } else {
811 sector_t good_sectors =
812 first_bad - dev_sector;
813 if (good_sectors > best_good_sectors) {
814 best_good_sectors = good_sectors;
815 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100816 best_rdev = rdev;
NeilBrown856e08e2011-07-28 11:39:23 +1000817 }
818 if (!do_balance)
819 /* Must read from here */
820 break;
821 }
822 continue;
823 } else
824 best_good_sectors = sectors;
825
NeilBrown56d99122011-05-11 14:27:03 +1000826 if (!do_balance)
827 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
NeilBrown8d3ca832016-11-18 16:16:12 +1100829 if (best_slot >= 0)
830 /* At least 2 disks to choose from so failfast is OK */
831 set_bit(R10BIO_FailFast, &r10_bio->state);
NeilBrown22dfdf52005-11-28 13:44:09 -0800832 /* This optimisation is debatable, and completely destroys
833 * sequential read speed for 'far copies' arrays. So only
834 * keep it for 'near' arrays, and review those later.
835 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000836 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
NeilBrown8d3ca832016-11-18 16:16:12 +1100837 new_distance = 0;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800838
839 /* for far > 1 always use the lowest address */
NeilBrown8d3ca832016-11-18 16:16:12 +1100840 else if (geo->far_copies > 1)
NeilBrown56d99122011-05-11 14:27:03 +1000841 new_distance = r10_bio->devs[slot].addr;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800842 else
NeilBrown56d99122011-05-11 14:27:03 +1000843 new_distance = abs(r10_bio->devs[slot].addr -
844 conf->mirrors[disk].head_position);
845 if (new_distance < best_dist) {
846 best_dist = new_distance;
847 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100848 best_rdev = rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850 }
NeilBrownabbf0982011-12-23 10:17:54 +1100851 if (slot >= conf->copies) {
NeilBrown56d99122011-05-11 14:27:03 +1000852 slot = best_slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100853 rdev = best_rdev;
854 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
NeilBrown56d99122011-05-11 14:27:03 +1000856 if (slot >= 0) {
NeilBrown56d99122011-05-11 14:27:03 +1000857 atomic_inc(&rdev->nr_pending);
NeilBrown56d99122011-05-11 14:27:03 +1000858 r10_bio->read_slot = slot;
859 } else
NeilBrown96c3fd12011-12-23 10:17:54 +1100860 rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 rcu_read_unlock();
NeilBrown856e08e2011-07-28 11:39:23 +1000862 *max_sectors = best_good_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
NeilBrown96c3fd12011-12-23 10:17:54 +1100864 return rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
866
NeilBrown5c675f82014-12-15 12:56:56 +1100867static int raid10_congested(struct mddev *mddev, int bits)
NeilBrown0d129222006-10-03 01:15:54 -0700868{
NeilBrowne879a872011-10-11 16:49:02 +1100869 struct r10conf *conf = mddev->private;
NeilBrown0d129222006-10-03 01:15:54 -0700870 int i, ret = 0;
871
Tejun Heo44522262015-05-22 17:13:26 -0400872 if ((bits & (1 << WB_async_congested)) &&
NeilBrown34db0cd2011-10-11 16:50:01 +1100873 conf->pending_count >= max_queued_requests)
874 return 1;
875
NeilBrown0d129222006-10-03 01:15:54 -0700876 rcu_read_lock();
NeilBrownf8c9e742012-05-21 09:28:33 +1000877 for (i = 0;
878 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
879 && ret == 0;
880 i++) {
NeilBrown3cb03002011-10-11 16:45:26 +1100881 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrown0d129222006-10-03 01:15:54 -0700882 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200883 struct request_queue *q = bdev_get_queue(rdev->bdev);
NeilBrown0d129222006-10-03 01:15:54 -0700884
Jan Karadc3b17c2017-02-02 15:56:50 +0100885 ret |= bdi_congested(q->backing_dev_info, bits);
NeilBrown0d129222006-10-03 01:15:54 -0700886 }
887 }
888 rcu_read_unlock();
889 return ret;
890}
891
NeilBrowne879a872011-10-11 16:49:02 +1100892static void flush_pending_writes(struct r10conf *conf)
NeilBrowna35e63e2008-03-04 14:29:29 -0800893{
894 /* Any writes that have been queued but are awaiting
895 * bitmap updates get flushed here.
NeilBrowna35e63e2008-03-04 14:29:29 -0800896 */
NeilBrowna35e63e2008-03-04 14:29:29 -0800897 spin_lock_irq(&conf->device_lock);
898
899 if (conf->pending_bio_list.head) {
Shaohua Li18022a12017-12-01 12:12:34 -0800900 struct blk_plug plug;
NeilBrowna35e63e2008-03-04 14:29:29 -0800901 struct bio *bio;
Shaohua Li18022a12017-12-01 12:12:34 -0800902
NeilBrowna35e63e2008-03-04 14:29:29 -0800903 bio = bio_list_get(&conf->pending_bio_list);
NeilBrown34db0cd2011-10-11 16:50:01 +1100904 conf->pending_count = 0;
NeilBrowna35e63e2008-03-04 14:29:29 -0800905 spin_unlock_irq(&conf->device_lock);
NeilBrown474beb52017-12-04 08:21:04 +1100906
907 /*
908 * As this is called in a wait_event() loop (see freeze_array),
909 * current->state might be TASK_UNINTERRUPTIBLE which will
910 * cause a warning when we prepare to wait again. As it is
911 * rare that this path is taken, it is perfectly safe to force
912 * us to go around the wait_event() loop again, so the warning
913 * is a false-positive. Silence the warning by resetting
914 * thread state
915 */
916 __set_current_state(TASK_RUNNING);
917
Shaohua Li18022a12017-12-01 12:12:34 -0800918 blk_start_plug(&plug);
NeilBrowna35e63e2008-03-04 14:29:29 -0800919 /* flush any pending bitmap writes to disk
920 * before proceeding w/ I/O */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700921 md_bitmap_unplug(conf->mddev->bitmap);
NeilBrown34db0cd2011-10-11 16:50:01 +1100922 wake_up(&conf->wait_barrier);
NeilBrowna35e63e2008-03-04 14:29:29 -0800923
924 while (bio) { /* submit pending writes */
925 struct bio *next = bio->bi_next;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200926 struct md_rdev *rdev = (void*)bio->bi_disk;
NeilBrowna35e63e2008-03-04 14:29:29 -0800927 bio->bi_next = NULL;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200928 bio_set_dev(bio, rdev->bdev);
NeilBrowna9ae93c2016-11-04 16:46:03 +1100929 if (test_bit(Faulty, &rdev->flags)) {
Guoqing Jiang6308d8e2017-07-21 16:33:44 +0800930 bio_io_error(bio);
NeilBrowna9ae93c2016-11-04 16:46:03 +1100931 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
Christoph Hellwig74d46992017-08-23 19:10:32 +0200932 !blk_queue_discard(bio->bi_disk->queue)))
Shaohua Li532a2a32012-10-11 13:30:52 +1100933 /* Just ignore it */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200934 bio_endio(bio);
Shaohua Li532a2a32012-10-11 13:30:52 +1100935 else
936 generic_make_request(bio);
NeilBrowna35e63e2008-03-04 14:29:29 -0800937 bio = next;
938 }
Shaohua Li18022a12017-12-01 12:12:34 -0800939 blk_finish_plug(&plug);
NeilBrowna35e63e2008-03-04 14:29:29 -0800940 } else
941 spin_unlock_irq(&conf->device_lock);
NeilBrowna35e63e2008-03-04 14:29:29 -0800942}
Jens Axboe7eaceac2011-03-10 08:52:07 +0100943
NeilBrown0a27ec92006-01-06 00:20:13 -0800944/* Barriers....
945 * Sometimes we need to suspend IO while we do something else,
946 * either some resync/recovery, or reconfigure the array.
947 * To do this we raise a 'barrier'.
948 * The 'barrier' is a counter that can be raised multiple times
949 * to count how many activities are happening which preclude
950 * normal IO.
951 * We can only raise the barrier if there is no pending IO.
952 * i.e. if nr_pending == 0.
953 * We choose only to raise the barrier if no-one is waiting for the
954 * barrier to go down. This means that as soon as an IO request
955 * is ready, no other operations which require a barrier will start
956 * until the IO request has had a chance.
957 *
958 * So: regular IO calls 'wait_barrier'. When that returns there
959 * is no backgroup IO happening, It must arrange to call
960 * allow_barrier when it has finished its IO.
961 * backgroup IO calls must call raise_barrier. Once that returns
962 * there is no normal IO happeing. It must arrange to call
963 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
NeilBrowne879a872011-10-11 16:49:02 +1100966static void raise_barrier(struct r10conf *conf, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
NeilBrown6cce3b22006-01-06 00:20:16 -0800968 BUG_ON(force && !conf->barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 spin_lock_irq(&conf->resync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
NeilBrown6cce3b22006-01-06 00:20:16 -0800971 /* Wait until no block IO is waiting (unless 'force') */
972 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
Lukas Czernereed8c022012-11-30 11:42:40 +0100973 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -0800974
975 /* block any new IO from starting */
976 conf->barrier++;
977
NeilBrownc3b328a2011-04-18 18:25:43 +1000978 /* Now wait for all pending IO to complete */
NeilBrown0a27ec92006-01-06 00:20:13 -0800979 wait_event_lock_irq(conf->wait_barrier,
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +0200980 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
Lukas Czernereed8c022012-11-30 11:42:40 +0100981 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -0800982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 spin_unlock_irq(&conf->resync_lock);
984}
985
NeilBrowne879a872011-10-11 16:49:02 +1100986static void lower_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -0800987{
988 unsigned long flags;
989 spin_lock_irqsave(&conf->resync_lock, flags);
990 conf->barrier--;
991 spin_unlock_irqrestore(&conf->resync_lock, flags);
992 wake_up(&conf->wait_barrier);
993}
994
NeilBrowne879a872011-10-11 16:49:02 +1100995static void wait_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -0800996{
997 spin_lock_irq(&conf->resync_lock);
998 if (conf->barrier) {
999 conf->nr_waiting++;
NeilBrownd6b42dc2012-03-19 12:46:38 +11001000 /* Wait for the barrier to drop.
1001 * However if there are already pending
1002 * requests (preventing the barrier from
1003 * rising completely), and the
1004 * pre-process bio queue isn't empty,
1005 * then don't wait, as we need to empty
1006 * that queue to get the nr_pending
1007 * count down.
1008 */
NeilBrown578b54a2016-11-14 16:30:21 +11001009 raid10_log(conf->mddev, "wait barrier");
NeilBrownd6b42dc2012-03-19 12:46:38 +11001010 wait_event_lock_irq(conf->wait_barrier,
1011 !conf->barrier ||
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001012 (atomic_read(&conf->nr_pending) &&
NeilBrownd6b42dc2012-03-19 12:46:38 +11001013 current->bio_list &&
NeilBrownf5fe1b52017-03-10 17:00:47 +11001014 (!bio_list_empty(&current->bio_list[0]) ||
1015 !bio_list_empty(&current->bio_list[1]))),
Lukas Czernereed8c022012-11-30 11:42:40 +01001016 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -08001017 conf->nr_waiting--;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001018 if (!conf->nr_waiting)
1019 wake_up(&conf->wait_barrier);
NeilBrown0a27ec92006-01-06 00:20:13 -08001020 }
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001021 atomic_inc(&conf->nr_pending);
NeilBrown0a27ec92006-01-06 00:20:13 -08001022 spin_unlock_irq(&conf->resync_lock);
1023}
1024
NeilBrowne879a872011-10-11 16:49:02 +11001025static void allow_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -08001026{
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001027 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1028 (conf->array_freeze_pending))
1029 wake_up(&conf->wait_barrier);
NeilBrown0a27ec92006-01-06 00:20:13 -08001030}
1031
NeilBrowne2d59922013-06-12 11:01:22 +10001032static void freeze_array(struct r10conf *conf, int extra)
NeilBrown4443ae12006-01-06 00:20:28 -08001033{
1034 /* stop syncio and normal IO and wait for everything to
NeilBrownf1885932006-01-06 00:20:42 -08001035 * go quiet.
NeilBrown4443ae12006-01-06 00:20:28 -08001036 * We increment barrier and nr_waiting, and then
NeilBrowne2d59922013-06-12 11:01:22 +10001037 * wait until nr_pending match nr_queued+extra
NeilBrown1c830532008-03-04 14:29:35 -08001038 * This is called in the context of one normal IO request
1039 * that has failed. Thus any sync request that might be pending
1040 * will be blocked by nr_pending, and we need to wait for
1041 * pending IO requests to complete or be queued for re-try.
NeilBrowne2d59922013-06-12 11:01:22 +10001042 * Thus the number queued (nr_queued) plus this request (extra)
NeilBrown1c830532008-03-04 14:29:35 -08001043 * must match the number of pending IOs (nr_pending) before
1044 * we continue.
NeilBrown4443ae12006-01-06 00:20:28 -08001045 */
1046 spin_lock_irq(&conf->resync_lock);
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001047 conf->array_freeze_pending++;
NeilBrown4443ae12006-01-06 00:20:28 -08001048 conf->barrier++;
1049 conf->nr_waiting++;
Lukas Czernereed8c022012-11-30 11:42:40 +01001050 wait_event_lock_irq_cmd(conf->wait_barrier,
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001051 atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
Lukas Czernereed8c022012-11-30 11:42:40 +01001052 conf->resync_lock,
1053 flush_pending_writes(conf));
NeilBrownc3b328a2011-04-18 18:25:43 +10001054
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02001055 conf->array_freeze_pending--;
NeilBrown4443ae12006-01-06 00:20:28 -08001056 spin_unlock_irq(&conf->resync_lock);
1057}
1058
NeilBrowne879a872011-10-11 16:49:02 +11001059static void unfreeze_array(struct r10conf *conf)
NeilBrown4443ae12006-01-06 00:20:28 -08001060{
1061 /* reverse the effect of the freeze */
1062 spin_lock_irq(&conf->resync_lock);
1063 conf->barrier--;
1064 conf->nr_waiting--;
1065 wake_up(&conf->wait_barrier);
1066 spin_unlock_irq(&conf->resync_lock);
1067}
1068
NeilBrownf8c9e742012-05-21 09:28:33 +10001069static sector_t choose_data_offset(struct r10bio *r10_bio,
1070 struct md_rdev *rdev)
1071{
1072 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1073 test_bit(R10BIO_Previous, &r10_bio->state))
1074 return rdev->data_offset;
1075 else
1076 return rdev->new_data_offset;
1077}
1078
NeilBrown57c67df2012-10-11 13:32:13 +11001079struct raid10_plug_cb {
1080 struct blk_plug_cb cb;
1081 struct bio_list pending;
1082 int pending_cnt;
1083};
1084
1085static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1086{
1087 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1088 cb);
1089 struct mddev *mddev = plug->cb.data;
1090 struct r10conf *conf = mddev->private;
1091 struct bio *bio;
1092
NeilBrown874807a2012-11-27 12:14:40 +11001093 if (from_schedule || current->bio_list) {
NeilBrown57c67df2012-10-11 13:32:13 +11001094 spin_lock_irq(&conf->device_lock);
1095 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1096 conf->pending_count += plug->pending_cnt;
1097 spin_unlock_irq(&conf->device_lock);
NeilBrownee0b0242013-02-25 12:38:29 +11001098 wake_up(&conf->wait_barrier);
NeilBrown57c67df2012-10-11 13:32:13 +11001099 md_wakeup_thread(mddev->thread);
1100 kfree(plug);
1101 return;
1102 }
1103
1104 /* we aren't scheduling, so we can do the write-out directly. */
1105 bio = bio_list_get(&plug->pending);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001106 md_bitmap_unplug(mddev->bitmap);
NeilBrown57c67df2012-10-11 13:32:13 +11001107 wake_up(&conf->wait_barrier);
1108
1109 while (bio) { /* submit pending writes */
1110 struct bio *next = bio->bi_next;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001111 struct md_rdev *rdev = (void*)bio->bi_disk;
NeilBrown57c67df2012-10-11 13:32:13 +11001112 bio->bi_next = NULL;
Christoph Hellwig74d46992017-08-23 19:10:32 +02001113 bio_set_dev(bio, rdev->bdev);
NeilBrowna9ae93c2016-11-04 16:46:03 +11001114 if (test_bit(Faulty, &rdev->flags)) {
Guoqing Jiang6308d8e2017-07-21 16:33:44 +08001115 bio_io_error(bio);
NeilBrowna9ae93c2016-11-04 16:46:03 +11001116 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
Christoph Hellwig74d46992017-08-23 19:10:32 +02001117 !blk_queue_discard(bio->bi_disk->queue)))
Shaohua Li32f9f572013-04-28 18:26:38 +08001118 /* Just ignore it */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001119 bio_endio(bio);
Shaohua Li32f9f572013-04-28 18:26:38 +08001120 else
1121 generic_make_request(bio);
NeilBrown57c67df2012-10-11 13:32:13 +11001122 bio = next;
1123 }
1124 kfree(plug);
1125}
1126
Guoqing Jiangcaea3c42018-12-07 18:24:21 +08001127/*
1128 * 1. Register the new request and wait if the reconstruction thread has put
1129 * up a bar for new requests. Continue immediately if no resync is active
1130 * currently.
1131 * 2. If IO spans the reshape position. Need to wait for reshape to pass.
1132 */
1133static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1134 struct bio *bio, sector_t sectors)
1135{
1136 wait_barrier(conf);
1137 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1138 bio->bi_iter.bi_sector < conf->reshape_progress &&
1139 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1140 raid10_log(conf->mddev, "wait reshape");
1141 allow_barrier(conf);
1142 wait_event(conf->wait_barrier,
1143 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1144 conf->reshape_progress >= bio->bi_iter.bi_sector +
1145 sectors);
1146 wait_barrier(conf);
1147 }
1148}
1149
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001150static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1151 struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152{
NeilBrowne879a872011-10-11 16:49:02 +11001153 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 struct bio *read_bio;
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001155 const int op = bio_op(bio);
1156 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001157 int max_sectors;
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001158 struct md_rdev *rdev;
NeilBrown545250f2017-04-05 14:05:51 +10001159 char b[BDEVNAME_SIZE];
1160 int slot = r10_bio->read_slot;
1161 struct md_rdev *err_rdev = NULL;
1162 gfp_t gfp = GFP_NOIO;
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001163
NeilBrown545250f2017-04-05 14:05:51 +10001164 if (r10_bio->devs[slot].rdev) {
1165 /*
1166 * This is an error retry, but we cannot
1167 * safely dereference the rdev in the r10_bio,
1168 * we must use the one in conf.
1169 * If it has already been disconnected (unlikely)
1170 * we lose the device name in error messages.
1171 */
1172 int disk;
1173 /*
1174 * As we are blocking raid10, it is a little safer to
1175 * use __GFP_HIGH.
1176 */
1177 gfp = GFP_NOIO | __GFP_HIGH;
1178
1179 rcu_read_lock();
1180 disk = r10_bio->devs[slot].devnum;
1181 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1182 if (err_rdev)
1183 bdevname(err_rdev->bdev, b);
1184 else {
1185 strcpy(b, "???");
1186 /* This never gets dereferenced */
1187 err_rdev = r10_bio->devs[slot].rdev;
1188 }
1189 rcu_read_unlock();
1190 }
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001191
Guoqing Jiangcaea3c42018-12-07 18:24:21 +08001192 regular_request_wait(mddev, conf, bio, r10_bio->sectors);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001193 rdev = read_balance(conf, r10_bio, &max_sectors);
1194 if (!rdev) {
NeilBrown545250f2017-04-05 14:05:51 +10001195 if (err_rdev) {
1196 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1197 mdname(mddev), b,
1198 (unsigned long long)r10_bio->sector);
1199 }
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001200 raid_end_bio_io(r10_bio);
1201 return;
1202 }
NeilBrown545250f2017-04-05 14:05:51 +10001203 if (err_rdev)
1204 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
1205 mdname(mddev),
1206 bdevname(rdev->bdev, b),
1207 (unsigned long long)r10_bio->sector);
NeilBrownfc9977d2017-04-05 14:05:51 +10001208 if (max_sectors < bio_sectors(bio)) {
1209 struct bio *split = bio_split(bio, max_sectors,
Kent Overstreetafeee512018-05-20 18:25:52 -04001210 gfp, &conf->bio_split);
NeilBrownfc9977d2017-04-05 14:05:51 +10001211 bio_chain(split, bio);
1212 generic_make_request(bio);
1213 bio = split;
1214 r10_bio->master_bio = bio;
1215 r10_bio->sectors = max_sectors;
1216 }
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001217 slot = r10_bio->read_slot;
1218
Kent Overstreetafeee512018-05-20 18:25:52 -04001219 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001220
1221 r10_bio->devs[slot].bio = read_bio;
1222 r10_bio->devs[slot].rdev = rdev;
1223
1224 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1225 choose_data_offset(r10_bio, rdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001226 bio_set_dev(read_bio, rdev->bdev);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001227 read_bio->bi_end_io = raid10_end_read_request;
1228 bio_set_op_attrs(read_bio, op, do_sync);
1229 if (test_bit(FailFast, &rdev->flags) &&
1230 test_bit(R10BIO_FailFast, &r10_bio->state))
1231 read_bio->bi_opf |= MD_FAILFAST;
1232 read_bio->bi_private = r10_bio;
1233
1234 if (mddev->gendisk)
Christoph Hellwig74d46992017-08-23 19:10:32 +02001235 trace_block_bio_remap(read_bio->bi_disk->queue,
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001236 read_bio, disk_devt(mddev->gendisk),
1237 r10_bio->sector);
NeilBrownfc9977d2017-04-05 14:05:51 +10001238 generic_make_request(read_bio);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001239 return;
1240}
1241
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001242static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1243 struct bio *bio, bool replacement,
NeilBrownfc9977d2017-04-05 14:05:51 +10001244 int n_copy)
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001245{
1246 const int op = bio_op(bio);
1247 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1248 const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1249 unsigned long flags;
1250 struct blk_plug_cb *cb;
1251 struct raid10_plug_cb *plug = NULL;
1252 struct r10conf *conf = mddev->private;
1253 struct md_rdev *rdev;
1254 int devnum = r10_bio->devs[n_copy].devnum;
1255 struct bio *mbio;
1256
1257 if (replacement) {
1258 rdev = conf->mirrors[devnum].replacement;
1259 if (rdev == NULL) {
1260 /* Replacement just got moved to main 'rdev' */
1261 smp_mb();
1262 rdev = conf->mirrors[devnum].rdev;
1263 }
1264 } else
1265 rdev = conf->mirrors[devnum].rdev;
1266
Kent Overstreetafeee512018-05-20 18:25:52 -04001267 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001268 if (replacement)
1269 r10_bio->devs[n_copy].repl_bio = mbio;
1270 else
1271 r10_bio->devs[n_copy].bio = mbio;
1272
1273 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1274 choose_data_offset(r10_bio, rdev));
Christoph Hellwig74d46992017-08-23 19:10:32 +02001275 bio_set_dev(mbio, rdev->bdev);
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001276 mbio->bi_end_io = raid10_end_write_request;
1277 bio_set_op_attrs(mbio, op, do_sync | do_fua);
1278 if (!replacement && test_bit(FailFast,
1279 &conf->mirrors[devnum].rdev->flags)
1280 && enough(conf, devnum))
1281 mbio->bi_opf |= MD_FAILFAST;
1282 mbio->bi_private = r10_bio;
1283
1284 if (conf->mddev->gendisk)
Christoph Hellwig74d46992017-08-23 19:10:32 +02001285 trace_block_bio_remap(mbio->bi_disk->queue,
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001286 mbio, disk_devt(conf->mddev->gendisk),
1287 r10_bio->sector);
1288 /* flush_pending_writes() needs access to the rdev so...*/
Christoph Hellwig74d46992017-08-23 19:10:32 +02001289 mbio->bi_disk = (void *)rdev;
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001290
1291 atomic_inc(&r10_bio->remaining);
1292
1293 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1294 if (cb)
1295 plug = container_of(cb, struct raid10_plug_cb, cb);
1296 else
1297 plug = NULL;
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001298 if (plug) {
1299 bio_list_add(&plug->pending, mbio);
1300 plug->pending_cnt++;
1301 } else {
Shaohua Li23b245c2017-05-10 08:47:11 -07001302 spin_lock_irqsave(&conf->device_lock, flags);
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001303 bio_list_add(&conf->pending_bio_list, mbio);
1304 conf->pending_count++;
Shaohua Li23b245c2017-05-10 08:47:11 -07001305 spin_unlock_irqrestore(&conf->device_lock, flags);
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001306 md_wakeup_thread(mddev->thread);
Shaohua Li23b245c2017-05-10 08:47:11 -07001307 }
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001308}
1309
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001310static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1311 struct r10bio *r10_bio)
1312{
1313 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11001315 struct md_rdev *blocked_rdev;
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001316 sector_t sectors;
NeilBrownd4432c22011-07-28 11:39:24 +10001317 int max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Guoqing Jiangcb8a7a72017-10-24 15:11:51 +08001319 if ((mddev_is_clustered(mddev) &&
1320 md_cluster_ops->area_resyncing(mddev, WRITE,
1321 bio->bi_iter.bi_sector,
1322 bio_end_sector(bio)))) {
1323 DEFINE_WAIT(w);
1324 for (;;) {
1325 prepare_to_wait(&conf->wait_barrier,
1326 &w, TASK_IDLE);
1327 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1328 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1329 break;
1330 schedule();
1331 }
1332 finish_wait(&conf->wait_barrier, &w);
1333 }
1334
NeilBrownfc9977d2017-04-05 14:05:51 +10001335 sectors = r10_bio->sectors;
Guoqing Jiangcaea3c42018-12-07 18:24:21 +08001336 regular_request_wait(mddev, conf, bio, sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10001337 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
NeilBrown3ea7daa2012-05-22 13:53:47 +10001338 (mddev->reshape_backwards
Kent Overstreet4f024f32013-10-11 15:44:27 -07001339 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1340 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1341 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1342 bio->bi_iter.bi_sector < conf->reshape_progress))) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10001343 /* Need to update reshape_position in metadata */
1344 mddev->reshape_position = conf->reshape_progress;
Shaohua Li29530792016-12-08 15:48:19 -08001345 set_mask_bits(&mddev->sb_flags, 0,
1346 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown3ea7daa2012-05-22 13:53:47 +10001347 md_wakeup_thread(mddev->thread);
NeilBrown578b54a2016-11-14 16:30:21 +11001348 raid10_log(conf->mddev, "wait reshape metadata");
NeilBrown3ea7daa2012-05-22 13:53:47 +10001349 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08001350 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown3ea7daa2012-05-22 13:53:47 +10001351
1352 conf->reshape_safe = mddev->reshape_position;
1353 }
1354
NeilBrown34db0cd2011-10-11 16:50:01 +11001355 if (conf->pending_count >= max_queued_requests) {
1356 md_wakeup_thread(mddev->thread);
NeilBrown578b54a2016-11-14 16:30:21 +11001357 raid10_log(mddev, "wait queued");
NeilBrown34db0cd2011-10-11 16:50:01 +11001358 wait_event(conf->wait_barrier,
1359 conf->pending_count < max_queued_requests);
1360 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001361 /* first select target devices under rcu_lock and
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 * inc refcount on their rdev. Record them by setting
1363 * bios[x] to bio
NeilBrownd4432c22011-07-28 11:39:24 +10001364 * If there are known/acknowledged bad blocks on any device
1365 * on which we have seen a write error, we want to avoid
1366 * writing to those blocks. This potentially requires several
1367 * writes to write around the bad blocks. Each set of writes
NeilBrownfd16f2e2017-03-15 14:05:13 +11001368 * gets its own r10_bio with a set of bios attached.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 */
NeilBrownc3b328a2011-04-18 18:25:43 +10001370
NeilBrown69335ef2011-12-23 10:17:54 +11001371 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 raid10_find_phys(conf, r10_bio);
NeilBrownd4432c22011-07-28 11:39:24 +10001373retry_write:
Harvey Harrisoncb6969e2008-05-06 20:42:32 -07001374 blocked_rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 rcu_read_lock();
NeilBrownd4432c22011-07-28 11:39:24 +10001376 max_sectors = r10_bio->sectors;
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 for (i = 0; i < conf->copies; i++) {
1379 int d = r10_bio->devs[i].devnum;
NeilBrown3cb03002011-10-11 16:45:26 +11001380 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown475b0322011-12-23 10:17:55 +11001381 struct md_rdev *rrdev = rcu_dereference(
1382 conf->mirrors[d].replacement);
NeilBrown4ca40c22011-12-23 10:17:55 +11001383 if (rdev == rrdev)
1384 rrdev = NULL;
Dan Williams6bfe0b42008-04-30 00:52:32 -07001385 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1386 atomic_inc(&rdev->nr_pending);
1387 blocked_rdev = rdev;
1388 break;
1389 }
NeilBrown475b0322011-12-23 10:17:55 +11001390 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1391 atomic_inc(&rrdev->nr_pending);
1392 blocked_rdev = rrdev;
1393 break;
1394 }
Kent Overstreet8ae12662015-04-27 23:48:34 -07001395 if (rdev && (test_bit(Faulty, &rdev->flags)))
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001396 rdev = NULL;
Kent Overstreet8ae12662015-04-27 23:48:34 -07001397 if (rrdev && (test_bit(Faulty, &rrdev->flags)))
NeilBrown475b0322011-12-23 10:17:55 +11001398 rrdev = NULL;
1399
NeilBrownd4432c22011-07-28 11:39:24 +10001400 r10_bio->devs[i].bio = NULL;
NeilBrown475b0322011-12-23 10:17:55 +11001401 r10_bio->devs[i].repl_bio = NULL;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001402
1403 if (!rdev && !rrdev) {
NeilBrown6cce3b22006-01-06 00:20:16 -08001404 set_bit(R10BIO_Degraded, &r10_bio->state);
NeilBrownd4432c22011-07-28 11:39:24 +10001405 continue;
NeilBrown6cce3b22006-01-06 00:20:16 -08001406 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001407 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
NeilBrownd4432c22011-07-28 11:39:24 +10001408 sector_t first_bad;
1409 sector_t dev_sector = r10_bio->devs[i].addr;
1410 int bad_sectors;
1411 int is_bad;
1412
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001413 is_bad = is_badblock(rdev, dev_sector, max_sectors,
NeilBrownd4432c22011-07-28 11:39:24 +10001414 &first_bad, &bad_sectors);
1415 if (is_bad < 0) {
1416 /* Mustn't write here until the bad block
1417 * is acknowledged
1418 */
1419 atomic_inc(&rdev->nr_pending);
1420 set_bit(BlockedBadBlocks, &rdev->flags);
1421 blocked_rdev = rdev;
1422 break;
1423 }
1424 if (is_bad && first_bad <= dev_sector) {
1425 /* Cannot write here at all */
1426 bad_sectors -= (dev_sector - first_bad);
1427 if (bad_sectors < max_sectors)
1428 /* Mustn't write more than bad_sectors
1429 * to other devices yet
1430 */
1431 max_sectors = bad_sectors;
1432 /* We don't set R10BIO_Degraded as that
1433 * only applies if the disk is missing,
1434 * so it might be re-added, and we want to
1435 * know to recover this chunk.
1436 * In this case the device is here, and the
1437 * fact that this chunk is not in-sync is
1438 * recorded in the bad block log.
1439 */
1440 continue;
1441 }
1442 if (is_bad) {
1443 int good_sectors = first_bad - dev_sector;
1444 if (good_sectors < max_sectors)
1445 max_sectors = good_sectors;
1446 }
1447 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001448 if (rdev) {
1449 r10_bio->devs[i].bio = bio;
1450 atomic_inc(&rdev->nr_pending);
1451 }
NeilBrown475b0322011-12-23 10:17:55 +11001452 if (rrdev) {
1453 r10_bio->devs[i].repl_bio = bio;
1454 atomic_inc(&rrdev->nr_pending);
1455 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 }
1457 rcu_read_unlock();
1458
Dan Williams6bfe0b42008-04-30 00:52:32 -07001459 if (unlikely(blocked_rdev)) {
1460 /* Have to wait for this device to get unblocked, then retry */
1461 int j;
1462 int d;
1463
NeilBrown475b0322011-12-23 10:17:55 +11001464 for (j = 0; j < i; j++) {
Dan Williams6bfe0b42008-04-30 00:52:32 -07001465 if (r10_bio->devs[j].bio) {
1466 d = r10_bio->devs[j].devnum;
1467 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1468 }
NeilBrown475b0322011-12-23 10:17:55 +11001469 if (r10_bio->devs[j].repl_bio) {
NeilBrown4ca40c22011-12-23 10:17:55 +11001470 struct md_rdev *rdev;
NeilBrown475b0322011-12-23 10:17:55 +11001471 d = r10_bio->devs[j].devnum;
NeilBrown4ca40c22011-12-23 10:17:55 +11001472 rdev = conf->mirrors[d].replacement;
1473 if (!rdev) {
1474 /* Race with remove_disk */
1475 smp_mb();
1476 rdev = conf->mirrors[d].rdev;
1477 }
1478 rdev_dec_pending(rdev, mddev);
NeilBrown475b0322011-12-23 10:17:55 +11001479 }
1480 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001481 allow_barrier(conf);
NeilBrown578b54a2016-11-14 16:30:21 +11001482 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
Dan Williams6bfe0b42008-04-30 00:52:32 -07001483 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1484 wait_barrier(conf);
1485 goto retry_write;
1486 }
1487
NeilBrown6b6c8112017-03-15 14:05:13 +11001488 if (max_sectors < r10_bio->sectors)
NeilBrownd4432c22011-07-28 11:39:24 +10001489 r10_bio->sectors = max_sectors;
NeilBrownfc9977d2017-04-05 14:05:51 +10001490
1491 if (r10_bio->sectors < bio_sectors(bio)) {
1492 struct bio *split = bio_split(bio, r10_bio->sectors,
Kent Overstreetafeee512018-05-20 18:25:52 -04001493 GFP_NOIO, &conf->bio_split);
NeilBrownfc9977d2017-04-05 14:05:51 +10001494 bio_chain(split, bio);
1495 generic_make_request(bio);
1496 bio = split;
1497 r10_bio->master_bio = bio;
NeilBrownd4432c22011-07-28 11:39:24 +10001498 }
NeilBrownd4432c22011-07-28 11:39:24 +10001499
NeilBrown4e780642010-10-19 12:54:01 +11001500 atomic_set(&r10_bio->remaining, 1);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001501 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07001502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 for (i = 0; i < conf->copies; i++) {
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001504 if (r10_bio->devs[i].bio)
NeilBrownfc9977d2017-04-05 14:05:51 +10001505 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
Guoqing Jiang27f26a02017-03-20 17:46:04 +08001506 if (r10_bio->devs[i].repl_bio)
NeilBrownfc9977d2017-04-05 14:05:51 +10001507 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
NeilBrownd4432c22011-07-28 11:39:24 +10001508 }
NeilBrown079fa162011-09-10 17:21:23 +10001509 one_write_done(r10_bio);
Kent Overstreet20d01892013-11-23 18:21:01 -08001510}
1511
NeilBrownfc9977d2017-04-05 14:05:51 +10001512static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001513{
1514 struct r10conf *conf = mddev->private;
1515 struct r10bio *r10_bio;
1516
Kent Overstreetafeee512018-05-20 18:25:52 -04001517 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001518
1519 r10_bio->master_bio = bio;
NeilBrownfc9977d2017-04-05 14:05:51 +10001520 r10_bio->sectors = sectors;
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001521
1522 r10_bio->mddev = mddev;
1523 r10_bio->sector = bio->bi_iter.bi_sector;
1524 r10_bio->state = 0;
NeilBrown545250f2017-04-05 14:05:51 +10001525 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
Robert LeBlancbb5f1ed2016-12-05 13:02:58 -07001526
1527 if (bio_data_dir(bio) == READ)
1528 raid10_read_request(mddev, bio, r10_bio);
1529 else
1530 raid10_write_request(mddev, bio, r10_bio);
1531}
1532
NeilBrowncc27b0c2017-06-05 16:49:39 +10001533static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
Kent Overstreet20d01892013-11-23 18:21:01 -08001534{
1535 struct r10conf *conf = mddev->private;
1536 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1537 int chunk_sects = chunk_mask + 1;
NeilBrownfc9977d2017-04-05 14:05:51 +10001538 int sectors = bio_sectors(bio);
Kent Overstreet20d01892013-11-23 18:21:01 -08001539
Jens Axboe1eff9d32016-08-05 15:35:16 -06001540 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
Kent Overstreet20d01892013-11-23 18:21:01 -08001541 md_flush_request(mddev, bio);
NeilBrowncc27b0c2017-06-05 16:49:39 +10001542 return true;
Kent Overstreet20d01892013-11-23 18:21:01 -08001543 }
1544
NeilBrowncc27b0c2017-06-05 16:49:39 +10001545 if (!md_write_start(mddev, bio))
1546 return false;
1547
NeilBrownfc9977d2017-04-05 14:05:51 +10001548 /*
1549 * If this request crosses a chunk boundary, we need to split
1550 * it.
1551 */
1552 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1553 sectors > chunk_sects
1554 && (conf->geo.near_copies < conf->geo.raid_disks
1555 || conf->prev.near_copies <
1556 conf->prev.raid_disks)))
1557 sectors = chunk_sects -
1558 (bio->bi_iter.bi_sector &
1559 (chunk_sects - 1));
1560 __make_request(mddev, bio, sectors);
NeilBrown079fa162011-09-10 17:21:23 +10001561
1562 /* In case raid10d snuck in to freeze_array */
1563 wake_up(&conf->wait_barrier);
NeilBrowncc27b0c2017-06-05 16:49:39 +10001564 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565}
1566
Shaohua Li849674e2016-01-20 13:52:20 -08001567static void raid10_status(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
NeilBrowne879a872011-10-11 16:49:02 +11001569 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 int i;
1571
NeilBrown5cf00fc2012-05-21 09:28:20 +10001572 if (conf->geo.near_copies < conf->geo.raid_disks)
Andre Noll9d8f0362009-06-18 08:45:01 +10001573 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
NeilBrown5cf00fc2012-05-21 09:28:20 +10001574 if (conf->geo.near_copies > 1)
1575 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1576 if (conf->geo.far_copies > 1) {
1577 if (conf->geo.far_offset)
1578 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
NeilBrownc93983b2006-06-26 00:27:41 -07001579 else
NeilBrown5cf00fc2012-05-21 09:28:20 +10001580 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
NeilBrown8bce6d32015-10-22 13:20:15 +11001581 if (conf->geo.far_set_size != conf->geo.raid_disks)
1582 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
NeilBrownc93983b2006-06-26 00:27:41 -07001583 }
NeilBrown5cf00fc2012-05-21 09:28:20 +10001584 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1585 conf->geo.raid_disks - mddev->degraded);
NeilBrownd44b0a92016-06-02 16:19:52 +10001586 rcu_read_lock();
1587 for (i = 0; i < conf->geo.raid_disks; i++) {
1588 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1589 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1590 }
1591 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 seq_printf(seq, "]");
1593}
1594
NeilBrown700c7212011-07-27 11:00:36 +10001595/* check if there are enough drives for
1596 * every block to appear on atleast one.
1597 * Don't consider the device numbered 'ignore'
1598 * as we might be about to remove it.
1599 */
NeilBrown635f6412013-06-11 14:57:09 +10001600static int _enough(struct r10conf *conf, int previous, int ignore)
NeilBrown700c7212011-07-27 11:00:36 +10001601{
1602 int first = 0;
NeilBrown725d6e52013-06-11 15:08:03 +10001603 int has_enough = 0;
NeilBrown635f6412013-06-11 14:57:09 +10001604 int disks, ncopies;
1605 if (previous) {
1606 disks = conf->prev.raid_disks;
1607 ncopies = conf->prev.near_copies;
1608 } else {
1609 disks = conf->geo.raid_disks;
1610 ncopies = conf->geo.near_copies;
1611 }
NeilBrown700c7212011-07-27 11:00:36 +10001612
NeilBrown725d6e52013-06-11 15:08:03 +10001613 rcu_read_lock();
NeilBrown700c7212011-07-27 11:00:36 +10001614 do {
1615 int n = conf->copies;
1616 int cnt = 0;
NeilBrown80b48122012-09-27 12:35:21 +10001617 int this = first;
NeilBrown700c7212011-07-27 11:00:36 +10001618 while (n--) {
NeilBrown725d6e52013-06-11 15:08:03 +10001619 struct md_rdev *rdev;
1620 if (this != ignore &&
1621 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1622 test_bit(In_sync, &rdev->flags))
NeilBrown700c7212011-07-27 11:00:36 +10001623 cnt++;
NeilBrown635f6412013-06-11 14:57:09 +10001624 this = (this+1) % disks;
NeilBrown700c7212011-07-27 11:00:36 +10001625 }
1626 if (cnt == 0)
NeilBrown725d6e52013-06-11 15:08:03 +10001627 goto out;
NeilBrown635f6412013-06-11 14:57:09 +10001628 first = (first + ncopies) % disks;
NeilBrown700c7212011-07-27 11:00:36 +10001629 } while (first != 0);
NeilBrown725d6e52013-06-11 15:08:03 +10001630 has_enough = 1;
1631out:
1632 rcu_read_unlock();
1633 return has_enough;
NeilBrown700c7212011-07-27 11:00:36 +10001634}
1635
NeilBrownf8c9e742012-05-21 09:28:33 +10001636static int enough(struct r10conf *conf, int ignore)
1637{
NeilBrown635f6412013-06-11 14:57:09 +10001638 /* when calling 'enough', both 'prev' and 'geo' must
1639 * be stable.
1640 * This is ensured if ->reconfig_mutex or ->device_lock
1641 * is held.
1642 */
1643 return _enough(conf, 0, ignore) &&
1644 _enough(conf, 1, ignore);
NeilBrownf8c9e742012-05-21 09:28:33 +10001645}
1646
Shaohua Li849674e2016-01-20 13:52:20 -08001647static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
1649 char b[BDEVNAME_SIZE];
NeilBrowne879a872011-10-11 16:49:02 +11001650 struct r10conf *conf = mddev->private;
NeilBrown635f6412013-06-11 14:57:09 +10001651 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
1653 /*
1654 * If it is not operational, then we have already marked it as dead
1655 * else if it is the last working disks, ignore the error, let the
1656 * next level up know.
1657 * else mark the drive as failed
1658 */
NeilBrown635f6412013-06-11 14:57:09 +10001659 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrownb2d444d2005-11-08 21:39:31 -08001660 if (test_bit(In_sync, &rdev->flags)
NeilBrown635f6412013-06-11 14:57:09 +10001661 && !enough(conf, rdev->raid_disk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 /*
1663 * Don't fail the drive, just return an IO error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 */
NeilBrownc04be0a2006-10-03 01:15:53 -07001665 spin_unlock_irqrestore(&conf->device_lock, flags);
NeilBrown635f6412013-06-11 14:57:09 +10001666 return;
1667 }
NeilBrown2446dba2014-07-31 10:16:29 +10001668 if (test_and_clear_bit(In_sync, &rdev->flags))
NeilBrown635f6412013-06-11 14:57:09 +10001669 mddev->degraded++;
NeilBrown2446dba2014-07-31 10:16:29 +10001670 /*
1671 * If recovery is running, make sure it aborts.
1672 */
1673 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownde393cd2011-07-28 11:31:48 +10001674 set_bit(Blocked, &rdev->flags);
NeilBrownb2d444d2005-11-08 21:39:31 -08001675 set_bit(Faulty, &rdev->flags);
Shaohua Li29530792016-12-08 15:48:19 -08001676 set_mask_bits(&mddev->sb_flags, 0,
1677 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown635f6412013-06-11 14:57:09 +10001678 spin_unlock_irqrestore(&conf->device_lock, flags);
NeilBrown08464e02016-11-02 14:16:50 +11001679 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1680 "md/raid10:%s: Operation continuing on %d devices.\n",
1681 mdname(mddev), bdevname(rdev->bdev, b),
1682 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683}
1684
NeilBrowne879a872011-10-11 16:49:02 +11001685static void print_conf(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686{
1687 int i;
NeilBrown4056ca52016-06-02 16:19:52 +10001688 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
NeilBrown08464e02016-11-02 14:16:50 +11001690 pr_debug("RAID10 conf printout:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 if (!conf) {
NeilBrown08464e02016-11-02 14:16:50 +11001692 pr_debug("(!conf)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 return;
1694 }
NeilBrown08464e02016-11-02 14:16:50 +11001695 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1696 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
NeilBrown4056ca52016-06-02 16:19:52 +10001698 /* This is only called with ->reconfix_mutex held, so
1699 * rcu protection of rdev is not needed */
NeilBrown5cf00fc2012-05-21 09:28:20 +10001700 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 char b[BDEVNAME_SIZE];
NeilBrown4056ca52016-06-02 16:19:52 +10001702 rdev = conf->mirrors[i].rdev;
1703 if (rdev)
NeilBrown08464e02016-11-02 14:16:50 +11001704 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1705 i, !test_bit(In_sync, &rdev->flags),
1706 !test_bit(Faulty, &rdev->flags),
1707 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 }
1709}
1710
NeilBrowne879a872011-10-11 16:49:02 +11001711static void close_sync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712{
NeilBrown0a27ec92006-01-06 00:20:13 -08001713 wait_barrier(conf);
1714 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Kent Overstreetafeee512018-05-20 18:25:52 -04001716 mempool_exit(&conf->r10buf_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717}
1718
NeilBrownfd01b882011-10-11 16:47:53 +11001719static int raid10_spare_active(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720{
1721 int i;
NeilBrowne879a872011-10-11 16:49:02 +11001722 struct r10conf *conf = mddev->private;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001723 struct raid10_info *tmp;
NeilBrown6b965622010-08-18 11:56:59 +10001724 int count = 0;
1725 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 /*
1728 * Find all non-in_sync disks within the RAID10 configuration
1729 * and mark them in_sync
1730 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10001731 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 tmp = conf->mirrors + i;
NeilBrown4ca40c22011-12-23 10:17:55 +11001733 if (tmp->replacement
1734 && tmp->replacement->recovery_offset == MaxSector
1735 && !test_bit(Faulty, &tmp->replacement->flags)
1736 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1737 /* Replacement has just become active */
1738 if (!tmp->rdev
1739 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1740 count++;
1741 if (tmp->rdev) {
1742 /* Replaced device not technically faulty,
1743 * but we need to be sure it gets removed
1744 * and never re-added.
1745 */
1746 set_bit(Faulty, &tmp->rdev->flags);
1747 sysfs_notify_dirent_safe(
1748 tmp->rdev->sysfs_state);
1749 }
1750 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1751 } else if (tmp->rdev
Lukasz Dorau61e49472013-10-24 12:55:17 +11001752 && tmp->rdev->recovery_offset == MaxSector
NeilBrown4ca40c22011-12-23 10:17:55 +11001753 && !test_bit(Faulty, &tmp->rdev->flags)
1754 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
NeilBrown6b965622010-08-18 11:56:59 +10001755 count++;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11001756 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 }
1758 }
NeilBrown6b965622010-08-18 11:56:59 +10001759 spin_lock_irqsave(&conf->device_lock, flags);
1760 mddev->degraded -= count;
1761 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763 print_conf(conf);
NeilBrown6b965622010-08-18 11:56:59 +10001764 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
NeilBrownfd01b882011-10-11 16:47:53 +11001767static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
NeilBrowne879a872011-10-11 16:49:02 +11001769 struct r10conf *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10001770 int err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 int mirror;
Neil Brown6c2fce22008-06-28 08:31:31 +10001772 int first = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10001773 int last = conf->geo.raid_disks - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 if (mddev->recovery_cp < MaxSector)
1776 /* only hot-add to in-sync arrays, as recovery is
1777 * very different from resync
1778 */
Neil Brown199050e2008-06-28 08:31:33 +10001779 return -EBUSY;
NeilBrown635f6412013-06-11 14:57:09 +10001780 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
Neil Brown199050e2008-06-28 08:31:33 +10001781 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Dan Williams1501efa2016-01-13 16:00:07 -08001783 if (md_integrity_add_rdev(rdev, mddev))
1784 return -ENXIO;
1785
NeilBrowna53a6c82008-11-06 17:28:20 +11001786 if (rdev->raid_disk >= 0)
Neil Brown6c2fce22008-06-28 08:31:31 +10001787 first = last = rdev->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Namhyung Kim2c4193d2011-07-18 17:38:43 +10001789 if (rdev->saved_raid_disk >= first &&
Shaohua Li9e753ba2018-10-14 17:05:07 -07001790 rdev->saved_raid_disk < conf->geo.raid_disks &&
NeilBrown6cce3b22006-01-06 00:20:16 -08001791 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1792 mirror = rdev->saved_raid_disk;
1793 else
Neil Brown6c2fce22008-06-28 08:31:31 +10001794 mirror = first;
NeilBrown2bb77732011-07-27 11:00:36 +10001795 for ( ; mirror <= last ; mirror++) {
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001796 struct raid10_info *p = &conf->mirrors[mirror];
NeilBrown2bb77732011-07-27 11:00:36 +10001797 if (p->recovery_disabled == mddev->recovery_disabled)
1798 continue;
NeilBrownb7044d42011-12-23 10:17:56 +11001799 if (p->rdev) {
1800 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1801 p->replacement != NULL)
1802 continue;
1803 clear_bit(In_sync, &rdev->flags);
1804 set_bit(Replacement, &rdev->flags);
1805 rdev->raid_disk = mirror;
1806 err = 0;
Jonathan Brassow9092c022013-05-02 14:19:24 -05001807 if (mddev->gendisk)
1808 disk_stack_limits(mddev->gendisk, rdev->bdev,
1809 rdev->data_offset << 9);
NeilBrownb7044d42011-12-23 10:17:56 +11001810 conf->fullsync = 1;
1811 rcu_assign_pointer(p->replacement, rdev);
1812 break;
1813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Jonathan Brassow9092c022013-05-02 14:19:24 -05001815 if (mddev->gendisk)
1816 disk_stack_limits(mddev->gendisk, rdev->bdev,
1817 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
NeilBrown2bb77732011-07-27 11:00:36 +10001819 p->head_position = 0;
NeilBrownd890fa22011-10-26 11:54:39 +11001820 p->recovery_disabled = mddev->recovery_disabled - 1;
NeilBrown2bb77732011-07-27 11:00:36 +10001821 rdev->raid_disk = mirror;
1822 err = 0;
1823 if (rdev->saved_raid_disk != mirror)
1824 conf->fullsync = 1;
1825 rcu_assign_pointer(p->rdev, rdev);
1826 break;
1827 }
Jonathan Brassowed30be02012-10-31 11:42:30 +11001828 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
Bart Van Assche8b904b52018-03-07 17:10:10 -08001829 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
Shaohua Li532a2a32012-10-11 13:30:52 +11001830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 print_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10001832 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
1834
NeilBrownb8321b62011-12-23 10:17:51 +11001835static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836{
NeilBrowne879a872011-10-11 16:49:02 +11001837 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 int err = 0;
NeilBrownb8321b62011-12-23 10:17:51 +11001839 int number = rdev->raid_disk;
NeilBrownc8ab9032011-12-23 10:17:54 +11001840 struct md_rdev **rdevp;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001841 struct raid10_info *p = conf->mirrors + number;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
1843 print_conf(conf);
NeilBrownc8ab9032011-12-23 10:17:54 +11001844 if (rdev == p->rdev)
1845 rdevp = &p->rdev;
1846 else if (rdev == p->replacement)
1847 rdevp = &p->replacement;
1848 else
1849 return 0;
1850
1851 if (test_bit(In_sync, &rdev->flags) ||
1852 atomic_read(&rdev->nr_pending)) {
1853 err = -EBUSY;
1854 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 }
NeilBrownd787be42016-06-02 16:19:53 +10001856 /* Only remove non-faulty devices if recovery
NeilBrownc8ab9032011-12-23 10:17:54 +11001857 * is not possible.
1858 */
1859 if (!test_bit(Faulty, &rdev->flags) &&
1860 mddev->recovery_disabled != p->recovery_disabled &&
NeilBrown4ca40c22011-12-23 10:17:55 +11001861 (!p->replacement || p->replacement == rdev) &&
NeilBrown63aced62012-05-22 13:55:33 +10001862 number < conf->geo.raid_disks &&
NeilBrownc8ab9032011-12-23 10:17:54 +11001863 enough(conf, -1)) {
1864 err = -EBUSY;
1865 goto abort;
1866 }
1867 *rdevp = NULL;
NeilBrownd787be42016-06-02 16:19:53 +10001868 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1869 synchronize_rcu();
1870 if (atomic_read(&rdev->nr_pending)) {
1871 /* lost the race, try later */
1872 err = -EBUSY;
1873 *rdevp = rdev;
1874 goto abort;
1875 }
1876 }
1877 if (p->replacement) {
NeilBrown4ca40c22011-12-23 10:17:55 +11001878 /* We must have just cleared 'rdev' */
1879 p->rdev = p->replacement;
1880 clear_bit(Replacement, &p->replacement->flags);
1881 smp_mb(); /* Make sure other CPUs may see both as identical
1882 * but will never see neither -- if they are careful.
1883 */
1884 p->replacement = NULL;
Guoqing Jiange5bc9c32017-04-24 15:58:04 +08001885 }
NeilBrown4ca40c22011-12-23 10:17:55 +11001886
Guoqing Jiange5bc9c32017-04-24 15:58:04 +08001887 clear_bit(WantReplacement, &rdev->flags);
NeilBrownc8ab9032011-12-23 10:17:54 +11001888 err = md_integrity_register(mddev);
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890abort:
1891
1892 print_conf(conf);
1893 return err;
1894}
1895
Ming Lei81fa1522017-03-17 00:12:32 +08001896static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
NeilBrowne879a872011-10-11 16:49:02 +11001898 struct r10conf *conf = r10_bio->mddev->private;
NeilBrown0eb3ff12006-01-06 00:20:29 -08001899
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001900 if (!bio->bi_status)
NeilBrown0eb3ff12006-01-06 00:20:29 -08001901 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrowne684e412011-07-28 11:39:25 +10001902 else
1903 /* The write handler will notice the lack of
1904 * R10BIO_Uptodate and record any errors etc
1905 */
NeilBrown4dbcdc72006-01-06 00:20:52 -08001906 atomic_add(r10_bio->sectors,
1907 &conf->mirrors[d].rdev->corrected_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 /* for reconstruct, we always reschedule after a read.
1910 * for resync, only after all reads
1911 */
NeilBrown73d5c382009-02-25 13:18:47 +11001912 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1914 atomic_dec_and_test(&r10_bio->remaining)) {
1915 /* we have read all the blocks,
1916 * do the comparison in process context in raid10d
1917 */
1918 reschedule_retry(r10_bio);
1919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920}
1921
Ming Lei81fa1522017-03-17 00:12:32 +08001922static void end_sync_read(struct bio *bio)
1923{
Ming Leif0250612017-03-17 00:12:33 +08001924 struct r10bio *r10_bio = get_resync_r10bio(bio);
Ming Lei81fa1522017-03-17 00:12:32 +08001925 struct r10conf *conf = r10_bio->mddev->private;
1926 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1927
1928 __end_sync_read(r10_bio, bio, d);
1929}
1930
1931static void end_reshape_read(struct bio *bio)
1932{
Ming Leif0250612017-03-17 00:12:33 +08001933 /* reshape read bio isn't allocated from r10buf_pool */
Ming Lei81fa1522017-03-17 00:12:32 +08001934 struct r10bio *r10_bio = bio->bi_private;
1935
1936 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
1937}
1938
NeilBrown9f2c9d12011-10-11 16:48:43 +11001939static void end_sync_request(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10001940{
NeilBrownfd01b882011-10-11 16:47:53 +11001941 struct mddev *mddev = r10_bio->mddev;
NeilBrown5e570282011-07-28 11:39:25 +10001942
1943 while (atomic_dec_and_test(&r10_bio->remaining)) {
1944 if (r10_bio->master_bio == NULL) {
1945 /* the primary of several recovery bios */
1946 sector_t s = r10_bio->sectors;
1947 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1948 test_bit(R10BIO_WriteError, &r10_bio->state))
1949 reschedule_retry(r10_bio);
1950 else
1951 put_buf(r10_bio);
1952 md_done_sync(mddev, s, 1);
1953 break;
1954 } else {
NeilBrown9f2c9d12011-10-11 16:48:43 +11001955 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
NeilBrown5e570282011-07-28 11:39:25 +10001956 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1957 test_bit(R10BIO_WriteError, &r10_bio->state))
1958 reschedule_retry(r10_bio);
1959 else
1960 put_buf(r10_bio);
1961 r10_bio = r10_bio2;
1962 }
1963 }
1964}
1965
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001966static void end_sync_write(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967{
Ming Leif0250612017-03-17 00:12:33 +08001968 struct r10bio *r10_bio = get_resync_r10bio(bio);
NeilBrownfd01b882011-10-11 16:47:53 +11001969 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11001970 struct r10conf *conf = mddev->private;
Namhyung Kim778ca012011-07-18 17:38:47 +10001971 int d;
NeilBrown749c55e2011-07-28 11:39:24 +10001972 sector_t first_bad;
1973 int bad_sectors;
1974 int slot;
NeilBrown9ad1aef2011-12-23 10:17:55 +11001975 int repl;
NeilBrown4ca40c22011-12-23 10:17:55 +11001976 struct md_rdev *rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
NeilBrown9ad1aef2011-12-23 10:17:55 +11001978 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1979 if (repl)
1980 rdev = conf->mirrors[d].replacement;
NeilBrown547414d2012-03-13 11:21:20 +11001981 else
NeilBrown9ad1aef2011-12-23 10:17:55 +11001982 rdev = conf->mirrors[d].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001984 if (bio->bi_status) {
NeilBrown9ad1aef2011-12-23 10:17:55 +11001985 if (repl)
1986 md_error(mddev, rdev);
1987 else {
1988 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11001989 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1990 set_bit(MD_RECOVERY_NEEDED,
1991 &rdev->mddev->recovery);
NeilBrown9ad1aef2011-12-23 10:17:55 +11001992 set_bit(R10BIO_WriteError, &r10_bio->state);
1993 }
1994 } else if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +10001995 r10_bio->devs[slot].addr,
1996 r10_bio->sectors,
1997 &first_bad, &bad_sectors))
1998 set_bit(R10BIO_MadeGood, &r10_bio->state);
NeilBrowndfc70642008-05-23 13:04:39 -07001999
NeilBrown9ad1aef2011-12-23 10:17:55 +11002000 rdev_dec_pending(rdev, mddev);
NeilBrown5e570282011-07-28 11:39:25 +10002001
2002 end_sync_request(r10_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
2005/*
2006 * Note: sync and recover and handled very differently for raid10
2007 * This code is for resync.
2008 * For resync, we read through virtual addresses and read all blocks.
2009 * If there is any error, we schedule a write. The lowest numbered
2010 * drive is authoritative.
2011 * However requests come for physical address, so we need to map.
2012 * For every physical address there are raid_disks/copies virtual addresses,
2013 * which is always are least one, but is not necessarly an integer.
2014 * This means that a physical address can span multiple chunks, so we may
2015 * have to submit multiple io requests for a single sync request.
2016 */
2017/*
2018 * We check if all blocks are in-sync and only write to blocks that
2019 * aren't in sync
2020 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11002021static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
NeilBrowne879a872011-10-11 16:49:02 +11002023 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 int i, first;
2025 struct bio *tbio, *fbio;
majianpengf4380a92012-04-12 16:04:47 +10002026 int vcnt;
Ming Leicdb76be2017-03-17 00:12:34 +08002027 struct page **tpages, **fpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 atomic_set(&r10_bio->remaining, 1);
2030
2031 /* find the first device with a block */
2032 for (i=0; i<conf->copies; i++)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002033 if (!r10_bio->devs[i].bio->bi_status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 break;
2035
2036 if (i == conf->copies)
2037 goto done;
2038
2039 first = i;
2040 fbio = r10_bio->devs[i].bio;
Artur Paszkiewiczcc578582015-12-18 15:19:16 +11002041 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2042 fbio->bi_iter.bi_idx = 0;
Ming Leicdb76be2017-03-17 00:12:34 +08002043 fpages = get_resync_pages(fbio)->pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
majianpengf4380a92012-04-12 16:04:47 +10002045 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 /* now find blocks with errors */
NeilBrown0eb3ff12006-01-06 00:20:29 -08002047 for (i=0 ; i < conf->copies ; i++) {
2048 int j, d;
NeilBrown8d3ca832016-11-18 16:16:12 +11002049 struct md_rdev *rdev;
Ming Leif0250612017-03-17 00:12:33 +08002050 struct resync_pages *rp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 tbio = r10_bio->devs[i].bio;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002053
2054 if (tbio->bi_end_io != end_sync_read)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002056 if (i == first)
2057 continue;
Ming Leicdb76be2017-03-17 00:12:34 +08002058
2059 tpages = get_resync_pages(tbio)->pages;
NeilBrown8d3ca832016-11-18 16:16:12 +11002060 d = r10_bio->devs[i].devnum;
2061 rdev = conf->mirrors[d].rdev;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002062 if (!r10_bio->devs[i].bio->bi_status) {
NeilBrown0eb3ff12006-01-06 00:20:29 -08002063 /* We know that the bi_io_vec layout is the same for
2064 * both 'first' and 'i', so we just compare them.
2065 * All vec entries are PAGE_SIZE;
2066 */
NeilBrown7bb23c42013-07-16 16:50:47 +10002067 int sectors = r10_bio->sectors;
2068 for (j = 0; j < vcnt; j++) {
2069 int len = PAGE_SIZE;
2070 if (sectors < (len / 512))
2071 len = sectors * 512;
Ming Leicdb76be2017-03-17 00:12:34 +08002072 if (memcmp(page_address(fpages[j]),
2073 page_address(tpages[j]),
NeilBrown7bb23c42013-07-16 16:50:47 +10002074 len))
NeilBrown0eb3ff12006-01-06 00:20:29 -08002075 break;
NeilBrown7bb23c42013-07-16 16:50:47 +10002076 sectors -= len/512;
2077 }
NeilBrown0eb3ff12006-01-06 00:20:29 -08002078 if (j == vcnt)
2079 continue;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11002080 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
NeilBrownf84ee362011-07-28 11:39:25 +10002081 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2082 /* Don't fix anything. */
2083 continue;
NeilBrown8d3ca832016-11-18 16:16:12 +11002084 } else if (test_bit(FailFast, &rdev->flags)) {
2085 /* Just give up on this device */
2086 md_error(rdev->mddev, rdev);
2087 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002088 }
NeilBrownf84ee362011-07-28 11:39:25 +10002089 /* Ok, we need to write this bio, either to correct an
2090 * inconsistency or to correct an unreadable block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 * First we need to fixup bv_offset, bv_len and
2092 * bi_vecs, as the read request might have corrupted these
2093 */
Ming Leif0250612017-03-17 00:12:33 +08002094 rp = get_resync_pages(tbio);
Kent Overstreet8be185f2012-09-06 14:14:43 -07002095 bio_reset(tbio);
2096
Ming Leifb0eb5d2017-07-14 16:14:43 +08002097 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2098
Ming Leif0250612017-03-17 00:12:33 +08002099 rp->raid_bio = r10_bio;
2100 tbio->bi_private = rp;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002101 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 tbio->bi_end_io = end_sync_write;
Mike Christie796a5cf2016-06-05 14:32:07 -05002103 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
Kent Overstreetc31df252015-05-06 23:34:20 -07002105 bio_copy_data(tbio, fbio);
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2108 atomic_inc(&r10_bio->remaining);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002109 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
NeilBrown1919cbb2016-11-18 16:16:12 +11002111 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2112 tbio->bi_opf |= MD_FAILFAST;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002113 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02002114 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 generic_make_request(tbio);
2116 }
2117
NeilBrown9ad1aef2011-12-23 10:17:55 +11002118 /* Now write out to any replacement devices
2119 * that are active
2120 */
2121 for (i = 0; i < conf->copies; i++) {
Kent Overstreetc31df252015-05-06 23:34:20 -07002122 int d;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002123
2124 tbio = r10_bio->devs[i].repl_bio;
2125 if (!tbio || !tbio->bi_end_io)
2126 continue;
2127 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2128 && r10_bio->devs[i].bio != fbio)
Kent Overstreetc31df252015-05-06 23:34:20 -07002129 bio_copy_data(tbio, fbio);
NeilBrown9ad1aef2011-12-23 10:17:55 +11002130 d = r10_bio->devs[i].devnum;
2131 atomic_inc(&r10_bio->remaining);
2132 md_sync_acct(conf->mirrors[d].replacement->bdev,
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002133 bio_sectors(tbio));
NeilBrown9ad1aef2011-12-23 10:17:55 +11002134 generic_make_request(tbio);
2135 }
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137done:
2138 if (atomic_dec_and_test(&r10_bio->remaining)) {
2139 md_done_sync(mddev, r10_bio->sectors, 1);
2140 put_buf(r10_bio);
2141 }
2142}
2143
2144/*
2145 * Now for the recovery code.
2146 * Recovery happens across physical sectors.
2147 * We recover all non-is_sync drives by finding the virtual address of
2148 * each, and then choose a working drive that also has that virt address.
2149 * There is a separate r10_bio for each non-in_sync drive.
2150 * Only the first two slots are in use. The first for reading,
2151 * The second for writing.
2152 *
2153 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11002154static void fix_recovery_read_error(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10002155{
2156 /* We got a read error during recovery.
2157 * We repeat the read in smaller page-sized sections.
2158 * If a read succeeds, write it to the new device or record
2159 * a bad block if we cannot.
2160 * If a read fails, record a bad block on both old and
2161 * new devices.
2162 */
NeilBrownfd01b882011-10-11 16:47:53 +11002163 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002164 struct r10conf *conf = mddev->private;
NeilBrown5e570282011-07-28 11:39:25 +10002165 struct bio *bio = r10_bio->devs[0].bio;
2166 sector_t sect = 0;
2167 int sectors = r10_bio->sectors;
2168 int idx = 0;
2169 int dr = r10_bio->devs[0].devnum;
2170 int dw = r10_bio->devs[1].devnum;
Ming Leicdb76be2017-03-17 00:12:34 +08002171 struct page **pages = get_resync_pages(bio)->pages;
NeilBrown5e570282011-07-28 11:39:25 +10002172
2173 while (sectors) {
2174 int s = sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11002175 struct md_rdev *rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002176 sector_t addr;
2177 int ok;
2178
2179 if (s > (PAGE_SIZE>>9))
2180 s = PAGE_SIZE >> 9;
2181
2182 rdev = conf->mirrors[dr].rdev;
2183 addr = r10_bio->devs[0].addr + sect,
2184 ok = sync_page_io(rdev,
2185 addr,
2186 s << 9,
Ming Leicdb76be2017-03-17 00:12:34 +08002187 pages[idx],
Mike Christie796a5cf2016-06-05 14:32:07 -05002188 REQ_OP_READ, 0, false);
NeilBrown5e570282011-07-28 11:39:25 +10002189 if (ok) {
2190 rdev = conf->mirrors[dw].rdev;
2191 addr = r10_bio->devs[1].addr + sect;
2192 ok = sync_page_io(rdev,
2193 addr,
2194 s << 9,
Ming Leicdb76be2017-03-17 00:12:34 +08002195 pages[idx],
Mike Christie796a5cf2016-06-05 14:32:07 -05002196 REQ_OP_WRITE, 0, false);
NeilBrownb7044d42011-12-23 10:17:56 +11002197 if (!ok) {
NeilBrown5e570282011-07-28 11:39:25 +10002198 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002199 if (!test_and_set_bit(WantReplacement,
2200 &rdev->flags))
2201 set_bit(MD_RECOVERY_NEEDED,
2202 &rdev->mddev->recovery);
2203 }
NeilBrown5e570282011-07-28 11:39:25 +10002204 }
2205 if (!ok) {
2206 /* We don't worry if we cannot set a bad block -
2207 * it really is bad so there is no loss in not
2208 * recording it yet
2209 */
2210 rdev_set_badblocks(rdev, addr, s, 0);
2211
2212 if (rdev != conf->mirrors[dw].rdev) {
2213 /* need bad block on destination too */
NeilBrown3cb03002011-10-11 16:45:26 +11002214 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002215 addr = r10_bio->devs[1].addr + sect;
2216 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2217 if (!ok) {
2218 /* just abort the recovery */
NeilBrown08464e02016-11-02 14:16:50 +11002219 pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2220 mdname(mddev));
NeilBrown5e570282011-07-28 11:39:25 +10002221
2222 conf->mirrors[dw].recovery_disabled
2223 = mddev->recovery_disabled;
2224 set_bit(MD_RECOVERY_INTR,
2225 &mddev->recovery);
2226 break;
2227 }
2228 }
2229 }
2230
2231 sectors -= s;
2232 sect += s;
2233 idx++;
2234 }
2235}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
NeilBrown9f2c9d12011-10-11 16:48:43 +11002237static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238{
NeilBrowne879a872011-10-11 16:49:02 +11002239 struct r10conf *conf = mddev->private;
Namhyung Kimc65060a2011-07-18 17:38:49 +10002240 int d;
NeilBrown24afd802011-12-23 10:17:55 +11002241 struct bio *wbio, *wbio2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
NeilBrown5e570282011-07-28 11:39:25 +10002243 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2244 fix_recovery_read_error(r10_bio);
2245 end_sync_request(r10_bio);
2246 return;
2247 }
2248
Namhyung Kimc65060a2011-07-18 17:38:49 +10002249 /*
2250 * share the pages with the first bio
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 * and submit the write request
2252 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 d = r10_bio->devs[1].devnum;
NeilBrown24afd802011-12-23 10:17:55 +11002254 wbio = r10_bio->devs[1].bio;
2255 wbio2 = r10_bio->devs[1].repl_bio;
NeilBrown0eb25bb2013-07-24 15:37:42 +10002256 /* Need to test wbio2->bi_end_io before we call
2257 * generic_make_request as if the former is NULL,
2258 * the latter is free to free wbio2.
2259 */
2260 if (wbio2 && !wbio2->bi_end_io)
2261 wbio2 = NULL;
NeilBrown24afd802011-12-23 10:17:55 +11002262 if (wbio->bi_end_io) {
2263 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002264 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
NeilBrown24afd802011-12-23 10:17:55 +11002265 generic_make_request(wbio);
2266 }
NeilBrown0eb25bb2013-07-24 15:37:42 +10002267 if (wbio2) {
NeilBrown24afd802011-12-23 10:17:55 +11002268 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2269 md_sync_acct(conf->mirrors[d].replacement->bdev,
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002270 bio_sectors(wbio2));
NeilBrown24afd802011-12-23 10:17:55 +11002271 generic_make_request(wbio2);
2272 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273}
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275/*
Robert Becker1e509152009-12-14 12:49:58 +11002276 * Used by fix_read_error() to decay the per rdev read_errors.
2277 * We halve the read error count for every hour that has elapsed
2278 * since the last recorded read error.
2279 *
2280 */
NeilBrownfd01b882011-10-11 16:47:53 +11002281static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
Robert Becker1e509152009-12-14 12:49:58 +11002282{
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02002283 long cur_time_mon;
Robert Becker1e509152009-12-14 12:49:58 +11002284 unsigned long hours_since_last;
2285 unsigned int read_errors = atomic_read(&rdev->read_errors);
2286
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02002287 cur_time_mon = ktime_get_seconds();
Robert Becker1e509152009-12-14 12:49:58 +11002288
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02002289 if (rdev->last_read_error == 0) {
Robert Becker1e509152009-12-14 12:49:58 +11002290 /* first time we've seen a read error */
2291 rdev->last_read_error = cur_time_mon;
2292 return;
2293 }
2294
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02002295 hours_since_last = (long)(cur_time_mon -
2296 rdev->last_read_error) / 3600;
Robert Becker1e509152009-12-14 12:49:58 +11002297
2298 rdev->last_read_error = cur_time_mon;
2299
2300 /*
2301 * if hours_since_last is > the number of bits in read_errors
2302 * just set read errors to 0. We do this to avoid
2303 * overflowing the shift of read_errors by hours_since_last.
2304 */
2305 if (hours_since_last >= 8 * sizeof(read_errors))
2306 atomic_set(&rdev->read_errors, 0);
2307 else
2308 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2309}
2310
NeilBrown3cb03002011-10-11 16:45:26 +11002311static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
NeilBrown58c54fc2011-07-28 11:39:25 +10002312 int sectors, struct page *page, int rw)
2313{
2314 sector_t first_bad;
2315 int bad_sectors;
2316
2317 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2318 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2319 return -1;
Mike Christie796a5cf2016-06-05 14:32:07 -05002320 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
NeilBrown58c54fc2011-07-28 11:39:25 +10002321 /* success */
2322 return 1;
NeilBrownb7044d42011-12-23 10:17:56 +11002323 if (rw == WRITE) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002324 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002325 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2326 set_bit(MD_RECOVERY_NEEDED,
2327 &rdev->mddev->recovery);
2328 }
NeilBrown58c54fc2011-07-28 11:39:25 +10002329 /* need to record an error - either for the block or the device */
2330 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2331 md_error(rdev->mddev, rdev);
2332 return 0;
2333}
2334
Robert Becker1e509152009-12-14 12:49:58 +11002335/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 * This is a kernel thread which:
2337 *
2338 * 1. Retries failed read operations on working mirrors.
2339 * 2. Updates the raid superblock when problems encounter.
NeilBrown6814d532006-10-03 01:15:45 -07002340 * 3. Performs writes following reads for array synchronising.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 */
2342
NeilBrowne879a872011-10-11 16:49:02 +11002343static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown6814d532006-10-03 01:15:45 -07002344{
2345 int sect = 0; /* Offset from r10_bio->sector */
2346 int sectors = r10_bio->sectors;
Yufen Yu13db16d2018-04-23 17:37:30 +08002347 struct md_rdev *rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002348 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002349 int d = r10_bio->devs[r10_bio->read_slot].devnum;
Robert Becker1e509152009-12-14 12:49:58 +11002350
NeilBrown7c4e06f2011-05-11 14:53:17 +10002351 /* still own a reference to this rdev, so it cannot
2352 * have been cleared recently.
2353 */
2354 rdev = conf->mirrors[d].rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002355
NeilBrown7c4e06f2011-05-11 14:53:17 +10002356 if (test_bit(Faulty, &rdev->flags))
2357 /* drive has already been failed, just ignore any
2358 more fix_read_error() attempts */
2359 return;
2360
2361 check_decay_read_errors(mddev, rdev);
2362 atomic_inc(&rdev->read_errors);
2363 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2364 char b[BDEVNAME_SIZE];
Robert Becker1e509152009-12-14 12:49:58 +11002365 bdevname(rdev->bdev, b);
2366
NeilBrown08464e02016-11-02 14:16:50 +11002367 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2368 mdname(mddev), b,
2369 atomic_read(&rdev->read_errors), max_read_errors);
2370 pr_notice("md/raid10:%s: %s: Failing raid device\n",
2371 mdname(mddev), b);
NeilBrownd683c8e2016-06-02 16:19:52 +10002372 md_error(mddev, rdev);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002373 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
NeilBrown7c4e06f2011-05-11 14:53:17 +10002374 return;
Robert Becker1e509152009-12-14 12:49:58 +11002375 }
Robert Becker1e509152009-12-14 12:49:58 +11002376
NeilBrown6814d532006-10-03 01:15:45 -07002377 while(sectors) {
2378 int s = sectors;
2379 int sl = r10_bio->read_slot;
2380 int success = 0;
2381 int start;
2382
2383 if (s > (PAGE_SIZE>>9))
2384 s = PAGE_SIZE >> 9;
2385
2386 rcu_read_lock();
2387 do {
NeilBrown8dbed5c2011-07-28 11:39:24 +10002388 sector_t first_bad;
2389 int bad_sectors;
2390
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002391 d = r10_bio->devs[sl].devnum;
NeilBrown6814d532006-10-03 01:15:45 -07002392 rdev = rcu_dereference(conf->mirrors[d].rdev);
2393 if (rdev &&
NeilBrown8dbed5c2011-07-28 11:39:24 +10002394 test_bit(In_sync, &rdev->flags) &&
NeilBrownf5b67ae2016-06-02 16:19:53 +10002395 !test_bit(Faulty, &rdev->flags) &&
NeilBrown8dbed5c2011-07-28 11:39:24 +10002396 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2397 &first_bad, &bad_sectors) == 0) {
NeilBrown6814d532006-10-03 01:15:45 -07002398 atomic_inc(&rdev->nr_pending);
2399 rcu_read_unlock();
NeilBrown2b193362010-10-27 15:16:40 +11002400 success = sync_page_io(rdev,
NeilBrown6814d532006-10-03 01:15:45 -07002401 r10_bio->devs[sl].addr +
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11002402 sect,
NeilBrown6814d532006-10-03 01:15:45 -07002403 s<<9,
Mike Christie796a5cf2016-06-05 14:32:07 -05002404 conf->tmppage,
2405 REQ_OP_READ, 0, false);
NeilBrown6814d532006-10-03 01:15:45 -07002406 rdev_dec_pending(rdev, mddev);
2407 rcu_read_lock();
2408 if (success)
2409 break;
2410 }
2411 sl++;
2412 if (sl == conf->copies)
2413 sl = 0;
2414 } while (!success && sl != r10_bio->read_slot);
2415 rcu_read_unlock();
2416
2417 if (!success) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002418 /* Cannot read from anywhere, just mark the block
2419 * as bad on the first device to discourage future
2420 * reads.
2421 */
NeilBrown6814d532006-10-03 01:15:45 -07002422 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
NeilBrown58c54fc2011-07-28 11:39:25 +10002423 rdev = conf->mirrors[dn].rdev;
2424
2425 if (!rdev_set_badblocks(
2426 rdev,
2427 r10_bio->devs[r10_bio->read_slot].addr
2428 + sect,
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002429 s, 0)) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002430 md_error(mddev, rdev);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002431 r10_bio->devs[r10_bio->read_slot].bio
2432 = IO_BLOCKED;
2433 }
NeilBrown6814d532006-10-03 01:15:45 -07002434 break;
2435 }
2436
2437 start = sl;
2438 /* write it back and re-read */
2439 rcu_read_lock();
2440 while (sl != r10_bio->read_slot) {
Robert Becker67b8dc42009-12-14 12:49:57 +11002441 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002442
NeilBrown6814d532006-10-03 01:15:45 -07002443 if (sl==0)
2444 sl = conf->copies;
2445 sl--;
2446 d = r10_bio->devs[sl].devnum;
2447 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002448 if (!rdev ||
NeilBrownf5b67ae2016-06-02 16:19:53 +10002449 test_bit(Faulty, &rdev->flags) ||
NeilBrown1294b9c2011-07-28 11:39:23 +10002450 !test_bit(In_sync, &rdev->flags))
2451 continue;
2452
2453 atomic_inc(&rdev->nr_pending);
2454 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002455 if (r10_sync_page_io(rdev,
2456 r10_bio->devs[sl].addr +
2457 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002458 s, conf->tmppage, WRITE)
NeilBrown1294b9c2011-07-28 11:39:23 +10002459 == 0) {
2460 /* Well, this device is dead */
NeilBrown08464e02016-11-02 14:16:50 +11002461 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2462 mdname(mddev), s,
2463 (unsigned long long)(
2464 sect +
2465 choose_data_offset(r10_bio,
2466 rdev)),
2467 bdevname(rdev->bdev, b));
2468 pr_notice("md/raid10:%s: %s: failing drive\n",
2469 mdname(mddev),
2470 bdevname(rdev->bdev, b));
NeilBrown6814d532006-10-03 01:15:45 -07002471 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002472 rdev_dec_pending(rdev, mddev);
2473 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002474 }
2475 sl = start;
2476 while (sl != r10_bio->read_slot) {
NeilBrown1294b9c2011-07-28 11:39:23 +10002477 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002478
NeilBrown6814d532006-10-03 01:15:45 -07002479 if (sl==0)
2480 sl = conf->copies;
2481 sl--;
2482 d = r10_bio->devs[sl].devnum;
2483 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002484 if (!rdev ||
NeilBrownf5b67ae2016-06-02 16:19:53 +10002485 test_bit(Faulty, &rdev->flags) ||
NeilBrown1294b9c2011-07-28 11:39:23 +10002486 !test_bit(In_sync, &rdev->flags))
2487 continue;
Robert Becker67b8dc42009-12-14 12:49:57 +11002488
NeilBrown1294b9c2011-07-28 11:39:23 +10002489 atomic_inc(&rdev->nr_pending);
2490 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002491 switch (r10_sync_page_io(rdev,
2492 r10_bio->devs[sl].addr +
2493 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002494 s, conf->tmppage,
NeilBrown58c54fc2011-07-28 11:39:25 +10002495 READ)) {
2496 case 0:
NeilBrown1294b9c2011-07-28 11:39:23 +10002497 /* Well, this device is dead */
NeilBrown08464e02016-11-02 14:16:50 +11002498 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
NeilBrown1294b9c2011-07-28 11:39:23 +10002499 mdname(mddev), s,
2500 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002501 sect +
2502 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002503 bdevname(rdev->bdev, b));
NeilBrown08464e02016-11-02 14:16:50 +11002504 pr_notice("md/raid10:%s: %s: failing drive\n",
NeilBrown1294b9c2011-07-28 11:39:23 +10002505 mdname(mddev),
2506 bdevname(rdev->bdev, b));
NeilBrown58c54fc2011-07-28 11:39:25 +10002507 break;
2508 case 1:
NeilBrown08464e02016-11-02 14:16:50 +11002509 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
NeilBrown1294b9c2011-07-28 11:39:23 +10002510 mdname(mddev), s,
2511 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002512 sect +
2513 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002514 bdevname(rdev->bdev, b));
2515 atomic_add(s, &rdev->corrected_errors);
NeilBrown6814d532006-10-03 01:15:45 -07002516 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002517
2518 rdev_dec_pending(rdev, mddev);
2519 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002520 }
2521 rcu_read_unlock();
2522
2523 sectors -= s;
2524 sect += s;
2525 }
2526}
2527
NeilBrown9f2c9d12011-10-11 16:48:43 +11002528static int narrow_write_error(struct r10bio *r10_bio, int i)
NeilBrownbd870a12011-07-28 11:39:24 +10002529{
2530 struct bio *bio = r10_bio->master_bio;
NeilBrownfd01b882011-10-11 16:47:53 +11002531 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002532 struct r10conf *conf = mddev->private;
NeilBrown3cb03002011-10-11 16:45:26 +11002533 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
NeilBrownbd870a12011-07-28 11:39:24 +10002534 /* bio has the data to be written to slot 'i' where
2535 * we just recently had a write error.
2536 * We repeatedly clone the bio and trim down to one block,
2537 * then try the write. Where the write fails we record
2538 * a bad block.
2539 * It is conceivable that the bio doesn't exactly align with
2540 * blocks. We must handle this.
2541 *
2542 * We currently own a reference to the rdev.
2543 */
2544
2545 int block_sectors;
2546 sector_t sector;
2547 int sectors;
2548 int sect_to_write = r10_bio->sectors;
2549 int ok = 1;
2550
2551 if (rdev->badblocks.shift < 0)
2552 return 0;
2553
NeilBrownf04ebb02015-02-16 14:51:54 +11002554 block_sectors = roundup(1 << rdev->badblocks.shift,
2555 bdev_logical_block_size(rdev->bdev) >> 9);
NeilBrownbd870a12011-07-28 11:39:24 +10002556 sector = r10_bio->sector;
2557 sectors = ((r10_bio->sector + block_sectors)
2558 & ~(sector_t)(block_sectors - 1))
2559 - sector;
2560
2561 while (sect_to_write) {
2562 struct bio *wbio;
Tomasz Majchrzak27028622016-08-23 10:53:57 +02002563 sector_t wsector;
NeilBrownbd870a12011-07-28 11:39:24 +10002564 if (sectors > sect_to_write)
2565 sectors = sect_to_write;
2566 /* Write at 'sector' for 'sectors' */
Kent Overstreetafeee512018-05-20 18:25:52 -04002567 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002568 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
Tomasz Majchrzak27028622016-08-23 10:53:57 +02002569 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2570 wbio->bi_iter.bi_sector = wsector +
2571 choose_data_offset(r10_bio, rdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +02002572 bio_set_dev(wbio, rdev->bdev);
Mike Christie796a5cf2016-06-05 14:32:07 -05002573 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05002574
2575 if (submit_bio_wait(wbio) < 0)
NeilBrownbd870a12011-07-28 11:39:24 +10002576 /* Failure! */
Tomasz Majchrzak27028622016-08-23 10:53:57 +02002577 ok = rdev_set_badblocks(rdev, wsector,
NeilBrownbd870a12011-07-28 11:39:24 +10002578 sectors, 0)
2579 && ok;
2580
2581 bio_put(wbio);
2582 sect_to_write -= sectors;
2583 sector += sectors;
2584 sectors = block_sectors;
2585 }
2586 return ok;
2587}
2588
NeilBrown9f2c9d12011-10-11 16:48:43 +11002589static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown560f8e52011-07-28 11:39:23 +10002590{
2591 int slot = r10_bio->read_slot;
NeilBrown560f8e52011-07-28 11:39:23 +10002592 struct bio *bio;
NeilBrowne879a872011-10-11 16:49:02 +11002593 struct r10conf *conf = mddev->private;
NeilBrownabbf0982011-12-23 10:17:54 +11002594 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
NeilBrown560f8e52011-07-28 11:39:23 +10002595
2596 /* we got a read error. Maybe the drive is bad. Maybe just
2597 * the block and we can fix it.
2598 * We freeze all other IO, and try reading the block from
2599 * other devices. When we find one, we re-write
2600 * and check it that fixes the read error.
2601 * This is all done synchronously while the array is
2602 * frozen.
2603 */
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002604 bio = r10_bio->devs[slot].bio;
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002605 bio_put(bio);
2606 r10_bio->devs[slot].bio = NULL;
2607
NeilBrown8d3ca832016-11-18 16:16:12 +11002608 if (mddev->ro)
2609 r10_bio->devs[slot].bio = IO_BLOCKED;
2610 else if (!test_bit(FailFast, &rdev->flags)) {
NeilBrowne2d59922013-06-12 11:01:22 +10002611 freeze_array(conf, 1);
NeilBrown560f8e52011-07-28 11:39:23 +10002612 fix_read_error(conf, mddev, r10_bio);
2613 unfreeze_array(conf);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002614 } else
NeilBrown8d3ca832016-11-18 16:16:12 +11002615 md_error(mddev, rdev);
NeilBrownfae8cc5e2012-02-14 11:10:10 +11002616
NeilBrownabbf0982011-12-23 10:17:54 +11002617 rdev_dec_pending(rdev, mddev);
NeilBrown545250f2017-04-05 14:05:51 +10002618 allow_barrier(conf);
2619 r10_bio->state = 0;
2620 raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
NeilBrown560f8e52011-07-28 11:39:23 +10002621}
2622
NeilBrowne879a872011-10-11 16:49:02 +11002623static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
NeilBrown749c55e2011-07-28 11:39:24 +10002624{
2625 /* Some sort of write request has finished and it
2626 * succeeded in writing where we thought there was a
2627 * bad block. So forget the bad block.
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002628 * Or possibly if failed and we need to record
2629 * a bad block.
NeilBrown749c55e2011-07-28 11:39:24 +10002630 */
2631 int m;
NeilBrown3cb03002011-10-11 16:45:26 +11002632 struct md_rdev *rdev;
NeilBrown749c55e2011-07-28 11:39:24 +10002633
2634 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2635 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002636 for (m = 0; m < conf->copies; m++) {
2637 int dev = r10_bio->devs[m].devnum;
2638 rdev = conf->mirrors[dev].rdev;
Yufen Yu01a69ca2018-02-06 17:39:15 +08002639 if (r10_bio->devs[m].bio == NULL ||
2640 r10_bio->devs[m].bio->bi_end_io == NULL)
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002641 continue;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002642 if (!r10_bio->devs[m].bio->bi_status) {
NeilBrown749c55e2011-07-28 11:39:24 +10002643 rdev_clear_badblocks(
2644 rdev,
2645 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002646 r10_bio->sectors, 0);
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002647 } else {
2648 if (!rdev_set_badblocks(
2649 rdev,
2650 r10_bio->devs[m].addr,
2651 r10_bio->sectors, 0))
2652 md_error(conf->mddev, rdev);
NeilBrown749c55e2011-07-28 11:39:24 +10002653 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002654 rdev = conf->mirrors[dev].replacement;
Yufen Yu01a69ca2018-02-06 17:39:15 +08002655 if (r10_bio->devs[m].repl_bio == NULL ||
2656 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
NeilBrown9ad1aef2011-12-23 10:17:55 +11002657 continue;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02002658
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002659 if (!r10_bio->devs[m].repl_bio->bi_status) {
NeilBrown9ad1aef2011-12-23 10:17:55 +11002660 rdev_clear_badblocks(
2661 rdev,
2662 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002663 r10_bio->sectors, 0);
NeilBrown9ad1aef2011-12-23 10:17:55 +11002664 } else {
2665 if (!rdev_set_badblocks(
2666 rdev,
2667 r10_bio->devs[m].addr,
2668 r10_bio->sectors, 0))
2669 md_error(conf->mddev, rdev);
2670 }
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002671 }
NeilBrown749c55e2011-07-28 11:39:24 +10002672 put_buf(r10_bio);
2673 } else {
NeilBrown95af5872015-08-14 11:26:17 +10002674 bool fail = false;
NeilBrownbd870a12011-07-28 11:39:24 +10002675 for (m = 0; m < conf->copies; m++) {
2676 int dev = r10_bio->devs[m].devnum;
2677 struct bio *bio = r10_bio->devs[m].bio;
2678 rdev = conf->mirrors[dev].rdev;
2679 if (bio == IO_MADE_GOOD) {
NeilBrown749c55e2011-07-28 11:39:24 +10002680 rdev_clear_badblocks(
2681 rdev,
2682 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002683 r10_bio->sectors, 0);
NeilBrown749c55e2011-07-28 11:39:24 +10002684 rdev_dec_pending(rdev, conf->mddev);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02002685 } else if (bio != NULL && bio->bi_status) {
NeilBrown95af5872015-08-14 11:26:17 +10002686 fail = true;
NeilBrownbd870a12011-07-28 11:39:24 +10002687 if (!narrow_write_error(r10_bio, m)) {
2688 md_error(conf->mddev, rdev);
2689 set_bit(R10BIO_Degraded,
2690 &r10_bio->state);
2691 }
2692 rdev_dec_pending(rdev, conf->mddev);
NeilBrown749c55e2011-07-28 11:39:24 +10002693 }
NeilBrown475b0322011-12-23 10:17:55 +11002694 bio = r10_bio->devs[m].repl_bio;
2695 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +11002696 if (rdev && bio == IO_MADE_GOOD) {
NeilBrown475b0322011-12-23 10:17:55 +11002697 rdev_clear_badblocks(
2698 rdev,
2699 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002700 r10_bio->sectors, 0);
NeilBrown475b0322011-12-23 10:17:55 +11002701 rdev_dec_pending(rdev, conf->mddev);
2702 }
NeilBrownbd870a12011-07-28 11:39:24 +10002703 }
NeilBrown95af5872015-08-14 11:26:17 +10002704 if (fail) {
2705 spin_lock_irq(&conf->device_lock);
2706 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
Shaohua Li23ddba82016-03-14 11:49:32 -07002707 conf->nr_queued++;
NeilBrown95af5872015-08-14 11:26:17 +10002708 spin_unlock_irq(&conf->device_lock);
Guoqing Jiangcf25ae72017-04-17 17:11:05 +08002709 /*
2710 * In case freeze_array() is waiting for condition
2711 * nr_pending == nr_queued + extra to be true.
2712 */
2713 wake_up(&conf->wait_barrier);
NeilBrown95af5872015-08-14 11:26:17 +10002714 md_wakeup_thread(conf->mddev->thread);
NeilBrownc3407022015-10-24 16:23:48 +11002715 } else {
2716 if (test_bit(R10BIO_WriteError,
2717 &r10_bio->state))
2718 close_write(r10_bio);
NeilBrown95af5872015-08-14 11:26:17 +10002719 raid_end_bio_io(r10_bio);
NeilBrownc3407022015-10-24 16:23:48 +11002720 }
NeilBrown749c55e2011-07-28 11:39:24 +10002721 }
2722}
2723
Shaohua Li4ed87312012-10-11 13:34:00 +11002724static void raid10d(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725{
Shaohua Li4ed87312012-10-11 13:34:00 +11002726 struct mddev *mddev = thread->mddev;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002727 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 unsigned long flags;
NeilBrowne879a872011-10-11 16:49:02 +11002729 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 struct list_head *head = &conf->retry_list;
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002731 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
2733 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
NeilBrown95af5872015-08-14 11:26:17 +10002735 if (!list_empty_careful(&conf->bio_end_io_list) &&
Shaohua Li29530792016-12-08 15:48:19 -08002736 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
NeilBrown95af5872015-08-14 11:26:17 +10002737 LIST_HEAD(tmp);
2738 spin_lock_irqsave(&conf->device_lock, flags);
Shaohua Li29530792016-12-08 15:48:19 -08002739 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
Shaohua Li23ddba82016-03-14 11:49:32 -07002740 while (!list_empty(&conf->bio_end_io_list)) {
2741 list_move(conf->bio_end_io_list.prev, &tmp);
2742 conf->nr_queued--;
2743 }
NeilBrown95af5872015-08-14 11:26:17 +10002744 }
2745 spin_unlock_irqrestore(&conf->device_lock, flags);
2746 while (!list_empty(&tmp)) {
Mikulas Patockaa4527442015-10-01 15:17:43 -04002747 r10_bio = list_first_entry(&tmp, struct r10bio,
2748 retry_list);
NeilBrown95af5872015-08-14 11:26:17 +10002749 list_del(&r10_bio->retry_list);
NeilBrownc3407022015-10-24 16:23:48 +11002750 if (mddev->degraded)
2751 set_bit(R10BIO_Degraded, &r10_bio->state);
2752
2753 if (test_bit(R10BIO_WriteError,
2754 &r10_bio->state))
2755 close_write(r10_bio);
NeilBrown95af5872015-08-14 11:26:17 +10002756 raid_end_bio_io(r10_bio);
2757 }
2758 }
2759
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002760 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 for (;;) {
NeilBrowna35e63e2008-03-04 14:29:29 -08002762
NeilBrown0021b7b2012-07-31 09:08:14 +02002763 flush_pending_writes(conf);
NeilBrowna35e63e2008-03-04 14:29:29 -08002764
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrowna35e63e2008-03-04 14:29:29 -08002766 if (list_empty(head)) {
NeilBrown6cce3b22006-01-06 00:20:16 -08002767 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 break;
NeilBrowna35e63e2008-03-04 14:29:29 -08002769 }
NeilBrown9f2c9d12011-10-11 16:48:43 +11002770 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 list_del(head->prev);
NeilBrown4443ae12006-01-06 00:20:28 -08002772 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 spin_unlock_irqrestore(&conf->device_lock, flags);
2774
2775 mddev = r10_bio->mddev;
NeilBrown070ec552009-06-16 16:54:21 +10002776 conf = mddev->private;
NeilBrownbd870a12011-07-28 11:39:24 +10002777 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2778 test_bit(R10BIO_WriteError, &r10_bio->state))
NeilBrown749c55e2011-07-28 11:39:24 +10002779 handle_write_completed(conf, r10_bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +10002780 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2781 reshape_request_write(mddev, r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +10002782 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 sync_request_write(mddev, r10_bio);
Jens Axboe7eaceac2011-03-10 08:52:07 +01002784 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 recovery_request_write(mddev, r10_bio);
NeilBrown856e08e2011-07-28 11:39:23 +10002786 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
NeilBrown560f8e52011-07-28 11:39:23 +10002787 handle_read_error(mddev, r10_bio);
NeilBrownfc9977d2017-04-05 14:05:51 +10002788 else
2789 WARN_ON_ONCE(1);
NeilBrown4443ae12006-01-06 00:20:28 -08002790
NeilBrown1d9d5242009-10-16 15:55:32 +11002791 cond_resched();
Shaohua Li29530792016-12-08 15:48:19 -08002792 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
NeilBrownde393cd2011-07-28 11:31:48 +10002793 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 }
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002795 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796}
2797
NeilBrowne879a872011-10-11 16:49:02 +11002798static int init_resync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
Kent Overstreetafeee512018-05-20 18:25:52 -04002800 int ret, buffs, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
2802 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Kent Overstreetafeee512018-05-20 18:25:52 -04002803 BUG_ON(mempool_initialized(&conf->r10buf_pool));
NeilBrown69335ef2011-12-23 10:17:54 +11002804 conf->have_replacement = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002805 for (i = 0; i < conf->geo.raid_disks; i++)
NeilBrown69335ef2011-12-23 10:17:54 +11002806 if (conf->mirrors[i].replacement)
2807 conf->have_replacement = 1;
Kent Overstreetafeee512018-05-20 18:25:52 -04002808 ret = mempool_init(&conf->r10buf_pool, buffs,
2809 r10buf_pool_alloc, r10buf_pool_free, conf);
2810 if (ret)
2811 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 conf->next_resync = 0;
2813 return 0;
2814}
2815
Shaohua Li208410b2017-08-24 17:50:40 -07002816static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
2817{
Kent Overstreetafeee512018-05-20 18:25:52 -04002818 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
Shaohua Li208410b2017-08-24 17:50:40 -07002819 struct rsync_pages *rp;
2820 struct bio *bio;
2821 int nalloc;
2822 int i;
2823
2824 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
2825 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
2826 nalloc = conf->copies; /* resync */
2827 else
2828 nalloc = 2; /* recovery */
2829
2830 for (i = 0; i < nalloc; i++) {
2831 bio = r10bio->devs[i].bio;
2832 rp = bio->bi_private;
2833 bio_reset(bio);
2834 bio->bi_private = rp;
2835 bio = r10bio->devs[i].repl_bio;
2836 if (bio) {
2837 rp = bio->bi_private;
2838 bio_reset(bio);
2839 bio->bi_private = rp;
2840 }
2841 }
2842 return r10bio;
2843}
2844
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845/*
Guoqing Jiang8db87912017-10-24 15:11:52 +08002846 * Set cluster_sync_high since we need other nodes to add the
2847 * range [cluster_sync_low, cluster_sync_high] to suspend list.
2848 */
2849static void raid10_set_cluster_sync_high(struct r10conf *conf)
2850{
2851 sector_t window_size;
2852 int extra_chunk, chunks;
2853
2854 /*
2855 * First, here we define "stripe" as a unit which across
2856 * all member devices one time, so we get chunks by use
2857 * raid_disks / near_copies. Otherwise, if near_copies is
2858 * close to raid_disks, then resync window could increases
2859 * linearly with the increase of raid_disks, which means
2860 * we will suspend a really large IO window while it is not
2861 * necessary. If raid_disks is not divisible by near_copies,
2862 * an extra chunk is needed to ensure the whole "stripe" is
2863 * covered.
2864 */
2865
2866 chunks = conf->geo.raid_disks / conf->geo.near_copies;
2867 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
2868 extra_chunk = 0;
2869 else
2870 extra_chunk = 1;
2871 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
2872
2873 /*
2874 * At least use a 32M window to align with raid1's resync window
2875 */
2876 window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
2877 CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
2878
2879 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
2880}
2881
2882/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 * perform a "sync" on one "block"
2884 *
2885 * We need to make sure that no normal I/O request - particularly write
2886 * requests - conflict with active sync requests.
2887 *
2888 * This is achieved by tracking pending requests and a 'barrier' concept
2889 * that can be installed to exclude normal IO requests.
2890 *
2891 * Resync and recovery are handled very differently.
2892 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2893 *
2894 * For resync, we iterate over virtual addresses, read all copies,
2895 * and update if there are differences. If only one copy is live,
2896 * skip it.
2897 * For recovery, we iterate over physical addresses, read a good
2898 * value for each non-in_sync drive, and over-write.
2899 *
2900 * So, for recovery we may have several outstanding complex requests for a
2901 * given address, one for each out-of-sync device. We model this by allocating
2902 * a number of r10_bio structures, one for each out-of-sync device.
2903 * As we setup these structures, we collect all bio's together into a list
2904 * which we then process collectively to add pages, and then process again
2905 * to pass to generic_make_request.
2906 *
2907 * The r10_bio structures are linked using a borrowed master_bio pointer.
2908 * This link is counted in ->remaining. When the r10_bio that points to NULL
2909 * has its remaining count decremented to 0, the whole complex operation
2910 * is complete.
2911 *
2912 */
2913
Shaohua Li849674e2016-01-20 13:52:20 -08002914static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
NeilBrown09314792015-02-19 16:04:40 +11002915 int *skipped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916{
NeilBrowne879a872011-10-11 16:49:02 +11002917 struct r10conf *conf = mddev->private;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002918 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 struct bio *biolist = NULL, *bio;
2920 sector_t max_sector, nr_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 int i;
NeilBrown6cce3b22006-01-06 00:20:16 -08002922 int max_sync;
NeilBrown57dab0b2010-10-19 10:03:39 +11002923 sector_t sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 sector_t sectors_skipped = 0;
2925 int chunks_skipped = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002926 sector_t chunk_mask = conf->geo.chunk_mask;
Ming Lei022e5102017-07-14 16:14:42 +08002927 int page_idx = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Kent Overstreetafeee512018-05-20 18:25:52 -04002929 if (!mempool_initialized(&conf->r10buf_pool))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07002931 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002933 /*
2934 * Allow skipping a full rebuild for incremental assembly
2935 * of a clean array, like RAID1 does.
2936 */
2937 if (mddev->bitmap == NULL &&
2938 mddev->recovery_cp == MaxSector &&
NeilBrown13765122013-07-04 16:41:53 +10002939 mddev->reshape_position == MaxSector &&
2940 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002941 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown13765122013-07-04 16:41:53 +10002942 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002943 conf->fullsync == 0) {
2944 *skipped = 1;
NeilBrown13765122013-07-04 16:41:53 +10002945 return mddev->dev_sectors - sector_nr;
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002946 }
2947
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 skipped:
Andre Noll58c0fed2009-03-31 14:33:13 +11002949 max_sector = mddev->dev_sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10002950 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2951 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 max_sector = mddev->resync_max_sectors;
2953 if (sector_nr >= max_sector) {
Guoqing Jiang8db87912017-10-24 15:11:52 +08002954 conf->cluster_sync_low = 0;
2955 conf->cluster_sync_high = 0;
2956
NeilBrown6cce3b22006-01-06 00:20:16 -08002957 /* If we aborted, we need to abort the
2958 * sync on the 'current' bitmap chucks (there can
2959 * be several when recovering multiple devices).
2960 * as we may have started syncing it but not finished.
2961 * We can find the current address in
2962 * mddev->curr_resync, but for recovery,
2963 * we need to convert that to several
2964 * virtual addresses.
2965 */
NeilBrown3ea7daa2012-05-22 13:53:47 +10002966 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2967 end_reshape(conf);
NeilBrownb3968552014-08-18 13:59:50 +10002968 close_sync(conf);
NeilBrown3ea7daa2012-05-22 13:53:47 +10002969 return 0;
2970 }
2971
NeilBrown6cce3b22006-01-06 00:20:16 -08002972 if (mddev->curr_resync < max_sector) { /* aborted */
2973 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002974 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2975 &sync_blocks, 1);
NeilBrown5cf00fc2012-05-21 09:28:20 +10002976 else for (i = 0; i < conf->geo.raid_disks; i++) {
NeilBrown6cce3b22006-01-06 00:20:16 -08002977 sector_t sect =
2978 raid10_find_virt(conf, mddev->curr_resync, i);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002979 md_bitmap_end_sync(mddev->bitmap, sect,
2980 &sync_blocks, 1);
NeilBrown6cce3b22006-01-06 00:20:16 -08002981 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002982 } else {
2983 /* completed sync */
2984 if ((!mddev->bitmap || conf->fullsync)
2985 && conf->have_replacement
2986 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2987 /* Completed a full sync so the replacements
2988 * are now fully recovered.
2989 */
NeilBrownf90145f2016-06-02 16:19:52 +10002990 rcu_read_lock();
2991 for (i = 0; i < conf->geo.raid_disks; i++) {
2992 struct md_rdev *rdev =
2993 rcu_dereference(conf->mirrors[i].replacement);
2994 if (rdev)
2995 rdev->recovery_offset = MaxSector;
2996 }
2997 rcu_read_unlock();
NeilBrown9ad1aef2011-12-23 10:17:55 +11002998 }
NeilBrown6cce3b22006-01-06 00:20:16 -08002999 conf->fullsync = 0;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003000 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07003001 md_bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 close_sync(conf);
NeilBrown57afd892005-06-21 17:17:13 -07003003 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 return sectors_skipped;
3005 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10003006
3007 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3008 return reshape_request(mddev, sector_nr, skipped);
3009
NeilBrown5cf00fc2012-05-21 09:28:20 +10003010 if (chunks_skipped >= conf->geo.raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 /* if there has been nothing to do on any drive,
3012 * then there is nothing to do at all..
3013 */
NeilBrown57afd892005-06-21 17:17:13 -07003014 *skipped = 1;
3015 return (max_sector - sector_nr) + sectors_skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 }
3017
NeilBrownc6207272008-02-06 01:39:52 -08003018 if (max_sector > mddev->resync_max)
3019 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3020
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 /* make sure whole request will fit in a chunk - if chunks
3022 * are meaningful
3023 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003024 if (conf->geo.near_copies < conf->geo.raid_disks &&
3025 max_sector > (sector_nr | chunk_mask))
3026 max_sector = (sector_nr | chunk_mask) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Tomasz Majchrzak7ac50442016-06-13 15:51:19 +02003028 /*
3029 * If there is non-resync activity waiting for a turn, then let it
3030 * though before starting on this new sync request.
3031 */
3032 if (conf->nr_waiting)
3033 schedule_timeout_uninterruptible(1);
3034
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 /* Again, very different code for resync and recovery.
3036 * Both must result in an r10bio with a list of bios that
Christoph Hellwig74d46992017-08-23 19:10:32 +02003037 * have bi_end_io, bi_sector, bi_disk set,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 * and bi_private set to the r10bio.
3039 * For recovery, we may actually create several r10bios
3040 * with 2 bios in each, that correspond to the bios in the main one.
3041 * In this case, the subordinate r10bios link back through a
3042 * borrowed master_bio pointer, and the counter in the master
3043 * includes a ref from each subordinate.
3044 */
3045 /* First, we decide what to do and set ->bi_end_io
3046 * To end_sync_read if we want to read, and
3047 * end_sync_write if we will want to write.
3048 */
3049
NeilBrown6cce3b22006-01-06 00:20:16 -08003050 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3052 /* recovery... the complicated one */
NeilBrowne875ece2011-07-28 11:39:24 +10003053 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 r10_bio = NULL;
3055
NeilBrown5cf00fc2012-05-21 09:28:20 +10003056 for (i = 0 ; i < conf->geo.raid_disks; i++) {
NeilBrownab9d47e2011-05-11 14:54:41 +10003057 int still_degraded;
NeilBrown9f2c9d12011-10-11 16:48:43 +11003058 struct r10bio *rb2;
NeilBrownab9d47e2011-05-11 14:54:41 +10003059 sector_t sect;
3060 int must_sync;
NeilBrowne875ece2011-07-28 11:39:24 +10003061 int any_working;
Alex Wuee37d732018-09-21 16:05:03 +08003062 int need_recover = 0;
3063 int need_replace = 0;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003064 struct raid10_info *mirror = &conf->mirrors[i];
NeilBrownf90145f2016-06-02 16:19:52 +10003065 struct md_rdev *mrdev, *mreplace;
NeilBrownab9d47e2011-05-11 14:54:41 +10003066
NeilBrownf90145f2016-06-02 16:19:52 +10003067 rcu_read_lock();
3068 mrdev = rcu_dereference(mirror->rdev);
3069 mreplace = rcu_dereference(mirror->replacement);
3070
Alex Wuee37d732018-09-21 16:05:03 +08003071 if (mrdev != NULL &&
3072 !test_bit(Faulty, &mrdev->flags) &&
3073 !test_bit(In_sync, &mrdev->flags))
3074 need_recover = 1;
3075 if (mreplace != NULL &&
3076 !test_bit(Faulty, &mreplace->flags))
3077 need_replace = 1;
3078
3079 if (!need_recover && !need_replace) {
NeilBrownf90145f2016-06-02 16:19:52 +10003080 rcu_read_unlock();
NeilBrownab9d47e2011-05-11 14:54:41 +10003081 continue;
NeilBrownf90145f2016-06-02 16:19:52 +10003082 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003083
3084 still_degraded = 0;
3085 /* want to reconstruct this device */
3086 rb2 = r10_bio;
3087 sect = raid10_find_virt(conf, sector_nr, i);
NeilBrownfc448a12012-07-03 10:37:30 +10003088 if (sect >= mddev->resync_max_sectors) {
3089 /* last stripe is not complete - don't
3090 * try to recover this sector.
3091 */
NeilBrownf90145f2016-06-02 16:19:52 +10003092 rcu_read_unlock();
NeilBrownfc448a12012-07-03 10:37:30 +10003093 continue;
3094 }
NeilBrownf5b67ae2016-06-02 16:19:53 +10003095 if (mreplace && test_bit(Faulty, &mreplace->flags))
3096 mreplace = NULL;
NeilBrown24afd802011-12-23 10:17:55 +11003097 /* Unless we are doing a full sync, or a replacement
3098 * we only need to recover the block if it is set in
3099 * the bitmap
NeilBrownab9d47e2011-05-11 14:54:41 +10003100 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07003101 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3102 &sync_blocks, 1);
NeilBrownab9d47e2011-05-11 14:54:41 +10003103 if (sync_blocks < max_sync)
3104 max_sync = sync_blocks;
3105 if (!must_sync &&
NeilBrownf90145f2016-06-02 16:19:52 +10003106 mreplace == NULL &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003107 !conf->fullsync) {
3108 /* yep, skip the sync_blocks here, but don't assume
3109 * that there will never be anything to do here
NeilBrown6cce3b22006-01-06 00:20:16 -08003110 */
NeilBrownab9d47e2011-05-11 14:54:41 +10003111 chunks_skipped = -1;
NeilBrownf90145f2016-06-02 16:19:52 +10003112 rcu_read_unlock();
NeilBrownab9d47e2011-05-11 14:54:41 +10003113 continue;
3114 }
NeilBrownf90145f2016-06-02 16:19:52 +10003115 atomic_inc(&mrdev->nr_pending);
3116 if (mreplace)
3117 atomic_inc(&mreplace->nr_pending);
3118 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119
Shaohua Li208410b2017-08-24 17:50:40 -07003120 r10_bio = raid10_alloc_init_r10buf(conf);
NeilBrowncb8b12b2014-08-18 14:38:45 +10003121 r10_bio->state = 0;
NeilBrownab9d47e2011-05-11 14:54:41 +10003122 raise_barrier(conf, rb2 != NULL);
3123 atomic_set(&r10_bio->remaining, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124
NeilBrownab9d47e2011-05-11 14:54:41 +10003125 r10_bio->master_bio = (struct bio*)rb2;
3126 if (rb2)
3127 atomic_inc(&rb2->remaining);
3128 r10_bio->mddev = mddev;
3129 set_bit(R10BIO_IsRecover, &r10_bio->state);
3130 r10_bio->sector = sect;
NeilBrown6cce3b22006-01-06 00:20:16 -08003131
NeilBrownab9d47e2011-05-11 14:54:41 +10003132 raid10_find_phys(conf, r10_bio);
NeilBrown18055562009-05-07 12:48:10 +10003133
NeilBrownab9d47e2011-05-11 14:54:41 +10003134 /* Need to check if the array will still be
3135 * degraded
3136 */
NeilBrownf90145f2016-06-02 16:19:52 +10003137 rcu_read_lock();
3138 for (j = 0; j < conf->geo.raid_disks; j++) {
3139 struct md_rdev *rdev = rcu_dereference(
3140 conf->mirrors[j].rdev);
3141 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
NeilBrownab9d47e2011-05-11 14:54:41 +10003142 still_degraded = 1;
NeilBrown87fc7672005-09-09 16:24:04 -07003143 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 }
NeilBrownf90145f2016-06-02 16:19:52 +10003145 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003146
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07003147 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3148 &sync_blocks, still_degraded);
NeilBrownab9d47e2011-05-11 14:54:41 +10003149
NeilBrowne875ece2011-07-28 11:39:24 +10003150 any_working = 0;
NeilBrownab9d47e2011-05-11 14:54:41 +10003151 for (j=0; j<conf->copies;j++) {
NeilBrowne875ece2011-07-28 11:39:24 +10003152 int k;
NeilBrownab9d47e2011-05-11 14:54:41 +10003153 int d = r10_bio->devs[j].devnum;
NeilBrown5e570282011-07-28 11:39:25 +10003154 sector_t from_addr, to_addr;
NeilBrownf90145f2016-06-02 16:19:52 +10003155 struct md_rdev *rdev =
3156 rcu_dereference(conf->mirrors[d].rdev);
NeilBrown40c356c2011-07-28 11:39:24 +10003157 sector_t sector, first_bad;
3158 int bad_sectors;
NeilBrownf90145f2016-06-02 16:19:52 +10003159 if (!rdev ||
3160 !test_bit(In_sync, &rdev->flags))
NeilBrownab9d47e2011-05-11 14:54:41 +10003161 continue;
3162 /* This is where we read from */
NeilBrowne875ece2011-07-28 11:39:24 +10003163 any_working = 1;
NeilBrown40c356c2011-07-28 11:39:24 +10003164 sector = r10_bio->devs[j].addr;
3165
3166 if (is_badblock(rdev, sector, max_sync,
3167 &first_bad, &bad_sectors)) {
3168 if (first_bad > sector)
3169 max_sync = first_bad - sector;
3170 else {
3171 bad_sectors -= (sector
3172 - first_bad);
3173 if (max_sync > bad_sectors)
3174 max_sync = bad_sectors;
3175 continue;
3176 }
3177 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003178 bio = r10_bio->devs[0].bio;
3179 bio->bi_next = biolist;
3180 biolist = bio;
NeilBrownab9d47e2011-05-11 14:54:41 +10003181 bio->bi_end_io = end_sync_read;
Mike Christie796a5cf2016-06-05 14:32:07 -05003182 bio_set_op_attrs(bio, REQ_OP_READ, 0);
NeilBrown8d3ca832016-11-18 16:16:12 +11003183 if (test_bit(FailFast, &rdev->flags))
3184 bio->bi_opf |= MD_FAILFAST;
NeilBrown5e570282011-07-28 11:39:25 +10003185 from_addr = r10_bio->devs[j].addr;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003186 bio->bi_iter.bi_sector = from_addr +
3187 rdev->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02003188 bio_set_dev(bio, rdev->bdev);
NeilBrown24afd802011-12-23 10:17:55 +11003189 atomic_inc(&rdev->nr_pending);
3190 /* and we write to 'i' (if not in_sync) */
NeilBrownab9d47e2011-05-11 14:54:41 +10003191
3192 for (k=0; k<conf->copies; k++)
3193 if (r10_bio->devs[k].devnum == i)
3194 break;
3195 BUG_ON(k == conf->copies);
NeilBrown5e570282011-07-28 11:39:25 +10003196 to_addr = r10_bio->devs[k].addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003197 r10_bio->devs[0].devnum = d;
NeilBrown5e570282011-07-28 11:39:25 +10003198 r10_bio->devs[0].addr = from_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003199 r10_bio->devs[1].devnum = i;
NeilBrown5e570282011-07-28 11:39:25 +10003200 r10_bio->devs[1].addr = to_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003201
Alex Wuee37d732018-09-21 16:05:03 +08003202 if (need_recover) {
NeilBrown24afd802011-12-23 10:17:55 +11003203 bio = r10_bio->devs[1].bio;
3204 bio->bi_next = biolist;
3205 biolist = bio;
NeilBrown24afd802011-12-23 10:17:55 +11003206 bio->bi_end_io = end_sync_write;
Mike Christie796a5cf2016-06-05 14:32:07 -05003207 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Kent Overstreet4f024f32013-10-11 15:44:27 -07003208 bio->bi_iter.bi_sector = to_addr
NeilBrownf90145f2016-06-02 16:19:52 +10003209 + mrdev->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02003210 bio_set_dev(bio, mrdev->bdev);
NeilBrown24afd802011-12-23 10:17:55 +11003211 atomic_inc(&r10_bio->remaining);
3212 } else
3213 r10_bio->devs[1].bio->bi_end_io = NULL;
3214
3215 /* and maybe write to replacement */
3216 bio = r10_bio->devs[1].repl_bio;
3217 if (bio)
3218 bio->bi_end_io = NULL;
Alex Wuee37d732018-09-21 16:05:03 +08003219 /* Note: if need_replace, then bio
NeilBrown24afd802011-12-23 10:17:55 +11003220 * cannot be NULL as r10buf_pool_alloc will
3221 * have allocated it.
NeilBrown24afd802011-12-23 10:17:55 +11003222 */
Alex Wuee37d732018-09-21 16:05:03 +08003223 if (!need_replace)
NeilBrown24afd802011-12-23 10:17:55 +11003224 break;
3225 bio->bi_next = biolist;
3226 biolist = bio;
NeilBrown24afd802011-12-23 10:17:55 +11003227 bio->bi_end_io = end_sync_write;
Mike Christie796a5cf2016-06-05 14:32:07 -05003228 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Kent Overstreet4f024f32013-10-11 15:44:27 -07003229 bio->bi_iter.bi_sector = to_addr +
NeilBrownf90145f2016-06-02 16:19:52 +10003230 mreplace->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02003231 bio_set_dev(bio, mreplace->bdev);
NeilBrown24afd802011-12-23 10:17:55 +11003232 atomic_inc(&r10_bio->remaining);
NeilBrownab9d47e2011-05-11 14:54:41 +10003233 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
NeilBrownf90145f2016-06-02 16:19:52 +10003235 rcu_read_unlock();
NeilBrownab9d47e2011-05-11 14:54:41 +10003236 if (j == conf->copies) {
NeilBrowne875ece2011-07-28 11:39:24 +10003237 /* Cannot recover, so abort the recovery or
3238 * record a bad block */
NeilBrowne875ece2011-07-28 11:39:24 +10003239 if (any_working) {
3240 /* problem is that there are bad blocks
3241 * on other device(s)
3242 */
3243 int k;
3244 for (k = 0; k < conf->copies; k++)
3245 if (r10_bio->devs[k].devnum == i)
3246 break;
NeilBrown24afd802011-12-23 10:17:55 +11003247 if (!test_bit(In_sync,
NeilBrownf90145f2016-06-02 16:19:52 +10003248 &mrdev->flags)
NeilBrown24afd802011-12-23 10:17:55 +11003249 && !rdev_set_badblocks(
NeilBrownf90145f2016-06-02 16:19:52 +10003250 mrdev,
NeilBrown24afd802011-12-23 10:17:55 +11003251 r10_bio->devs[k].addr,
3252 max_sync, 0))
3253 any_working = 0;
NeilBrownf90145f2016-06-02 16:19:52 +10003254 if (mreplace &&
NeilBrown24afd802011-12-23 10:17:55 +11003255 !rdev_set_badblocks(
NeilBrownf90145f2016-06-02 16:19:52 +10003256 mreplace,
NeilBrowne875ece2011-07-28 11:39:24 +10003257 r10_bio->devs[k].addr,
3258 max_sync, 0))
3259 any_working = 0;
3260 }
3261 if (!any_working) {
3262 if (!test_and_set_bit(MD_RECOVERY_INTR,
3263 &mddev->recovery))
NeilBrown08464e02016-11-02 14:16:50 +11003264 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
NeilBrowne875ece2011-07-28 11:39:24 +10003265 mdname(mddev));
NeilBrown24afd802011-12-23 10:17:55 +11003266 mirror->recovery_disabled
NeilBrowne875ece2011-07-28 11:39:24 +10003267 = mddev->recovery_disabled;
3268 }
NeilBrowne8b84912014-01-06 10:35:34 +11003269 put_buf(r10_bio);
3270 if (rb2)
3271 atomic_dec(&rb2->remaining);
3272 r10_bio = rb2;
NeilBrownf90145f2016-06-02 16:19:52 +10003273 rdev_dec_pending(mrdev, mddev);
3274 if (mreplace)
3275 rdev_dec_pending(mreplace, mddev);
NeilBrownab9d47e2011-05-11 14:54:41 +10003276 break;
3277 }
NeilBrownf90145f2016-06-02 16:19:52 +10003278 rdev_dec_pending(mrdev, mddev);
3279 if (mreplace)
3280 rdev_dec_pending(mreplace, mddev);
NeilBrown8d3ca832016-11-18 16:16:12 +11003281 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3282 /* Only want this if there is elsewhere to
3283 * read from. 'j' is currently the first
3284 * readable copy.
3285 */
3286 int targets = 1;
3287 for (; j < conf->copies; j++) {
3288 int d = r10_bio->devs[j].devnum;
3289 if (conf->mirrors[d].rdev &&
3290 test_bit(In_sync,
3291 &conf->mirrors[d].rdev->flags))
3292 targets++;
3293 }
3294 if (targets == 1)
3295 r10_bio->devs[0].bio->bi_opf
3296 &= ~MD_FAILFAST;
3297 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 if (biolist == NULL) {
3300 while (r10_bio) {
NeilBrown9f2c9d12011-10-11 16:48:43 +11003301 struct r10bio *rb2 = r10_bio;
3302 r10_bio = (struct r10bio*) rb2->master_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 rb2->master_bio = NULL;
3304 put_buf(rb2);
3305 }
3306 goto giveup;
3307 }
3308 } else {
3309 /* resync. Schedule a read for every block at this virt offset */
3310 int count = 0;
NeilBrown6cce3b22006-01-06 00:20:16 -08003311
Guoqing Jiang8db87912017-10-24 15:11:52 +08003312 /*
3313 * Since curr_resync_completed could probably not update in
3314 * time, and we will set cluster_sync_low based on it.
3315 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3316 * safety reason, which ensures curr_resync_completed is
3317 * updated in bitmap_cond_end_sync.
3318 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07003319 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3320 mddev_is_clustered(mddev) &&
3321 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
NeilBrown78200d42009-02-25 13:18:47 +11003322
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07003323 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3324 &sync_blocks, mddev->degraded) &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003325 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3326 &mddev->recovery)) {
NeilBrown6cce3b22006-01-06 00:20:16 -08003327 /* We can skip this block */
3328 *skipped = 1;
3329 return sync_blocks + sectors_skipped;
3330 }
3331 if (sync_blocks < max_sync)
3332 max_sync = sync_blocks;
Shaohua Li208410b2017-08-24 17:50:40 -07003333 r10_bio = raid10_alloc_init_r10buf(conf);
NeilBrowncb8b12b2014-08-18 14:38:45 +10003334 r10_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 r10_bio->mddev = mddev;
3337 atomic_set(&r10_bio->remaining, 0);
NeilBrown6cce3b22006-01-06 00:20:16 -08003338 raise_barrier(conf, 0);
3339 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340
3341 r10_bio->master_bio = NULL;
3342 r10_bio->sector = sector_nr;
3343 set_bit(R10BIO_IsSync, &r10_bio->state);
3344 raid10_find_phys(conf, r10_bio);
NeilBrown5cf00fc2012-05-21 09:28:20 +10003345 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
NeilBrown5cf00fc2012-05-21 09:28:20 +10003347 for (i = 0; i < conf->copies; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 int d = r10_bio->devs[i].devnum;
NeilBrown40c356c2011-07-28 11:39:24 +10003349 sector_t first_bad, sector;
3350 int bad_sectors;
NeilBrownf90145f2016-06-02 16:19:52 +10003351 struct md_rdev *rdev;
NeilBrown40c356c2011-07-28 11:39:24 +10003352
NeilBrown9ad1aef2011-12-23 10:17:55 +11003353 if (r10_bio->devs[i].repl_bio)
3354 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3355
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 bio = r10_bio->devs[i].bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003357 bio->bi_status = BLK_STS_IOERR;
NeilBrownf90145f2016-06-02 16:19:52 +10003358 rcu_read_lock();
3359 rdev = rcu_dereference(conf->mirrors[d].rdev);
3360 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3361 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 continue;
NeilBrownf90145f2016-06-02 16:19:52 +10003363 }
NeilBrown40c356c2011-07-28 11:39:24 +10003364 sector = r10_bio->devs[i].addr;
NeilBrownf90145f2016-06-02 16:19:52 +10003365 if (is_badblock(rdev, sector, max_sync,
NeilBrown40c356c2011-07-28 11:39:24 +10003366 &first_bad, &bad_sectors)) {
3367 if (first_bad > sector)
3368 max_sync = first_bad - sector;
3369 else {
3370 bad_sectors -= (sector - first_bad);
3371 if (max_sync > bad_sectors)
Dan Carpenter91502f02012-10-11 14:20:58 +11003372 max_sync = bad_sectors;
NeilBrownf90145f2016-06-02 16:19:52 +10003373 rcu_read_unlock();
NeilBrown40c356c2011-07-28 11:39:24 +10003374 continue;
3375 }
3376 }
NeilBrownf90145f2016-06-02 16:19:52 +10003377 atomic_inc(&rdev->nr_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378 atomic_inc(&r10_bio->remaining);
3379 bio->bi_next = biolist;
3380 biolist = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 bio->bi_end_io = end_sync_read;
Mike Christie796a5cf2016-06-05 14:32:07 -05003382 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Guoqing Jiang1cdd1252017-06-13 11:16:08 +08003383 if (test_bit(FailFast, &rdev->flags))
NeilBrown8d3ca832016-11-18 16:16:12 +11003384 bio->bi_opf |= MD_FAILFAST;
NeilBrownf90145f2016-06-02 16:19:52 +10003385 bio->bi_iter.bi_sector = sector + rdev->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02003386 bio_set_dev(bio, rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 count++;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003388
NeilBrownf90145f2016-06-02 16:19:52 +10003389 rdev = rcu_dereference(conf->mirrors[d].replacement);
3390 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3391 rcu_read_unlock();
NeilBrown9ad1aef2011-12-23 10:17:55 +11003392 continue;
NeilBrownf90145f2016-06-02 16:19:52 +10003393 }
3394 atomic_inc(&rdev->nr_pending);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003395
3396 /* Need to set up for writing to the replacement */
3397 bio = r10_bio->devs[i].repl_bio;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003398 bio->bi_status = BLK_STS_IOERR;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003399
3400 sector = r10_bio->devs[i].addr;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003401 bio->bi_next = biolist;
3402 biolist = bio;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003403 bio->bi_end_io = end_sync_write;
Mike Christie796a5cf2016-06-05 14:32:07 -05003404 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Guoqing Jiang1cdd1252017-06-13 11:16:08 +08003405 if (test_bit(FailFast, &rdev->flags))
NeilBrown1919cbb2016-11-18 16:16:12 +11003406 bio->bi_opf |= MD_FAILFAST;
NeilBrownf90145f2016-06-02 16:19:52 +10003407 bio->bi_iter.bi_sector = sector + rdev->data_offset;
Christoph Hellwig74d46992017-08-23 19:10:32 +02003408 bio_set_dev(bio, rdev->bdev);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003409 count++;
Guoqing Jiang1cdd1252017-06-13 11:16:08 +08003410 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 }
3412
3413 if (count < 2) {
3414 for (i=0; i<conf->copies; i++) {
3415 int d = r10_bio->devs[i].devnum;
3416 if (r10_bio->devs[i].bio->bi_end_io)
NeilBrownab9d47e2011-05-11 14:54:41 +10003417 rdev_dec_pending(conf->mirrors[d].rdev,
3418 mddev);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003419 if (r10_bio->devs[i].repl_bio &&
3420 r10_bio->devs[i].repl_bio->bi_end_io)
3421 rdev_dec_pending(
3422 conf->mirrors[d].replacement,
3423 mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 }
3425 put_buf(r10_bio);
3426 biolist = NULL;
3427 goto giveup;
3428 }
3429 }
3430
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 nr_sectors = 0;
NeilBrown6cce3b22006-01-06 00:20:16 -08003432 if (sector_nr + max_sync < max_sector)
3433 max_sector = sector_nr + max_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 do {
3435 struct page *page;
3436 int len = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 if (sector_nr + (len>>9) > max_sector)
3438 len = (max_sector - sector_nr) << 9;
3439 if (len == 0)
3440 break;
3441 for (bio= biolist ; bio ; bio=bio->bi_next) {
Ming Leif0250612017-03-17 00:12:33 +08003442 struct resync_pages *rp = get_resync_pages(bio);
Ming Lei022e5102017-07-14 16:14:42 +08003443 page = resync_fetch_page(rp, page_idx);
Ming Leic85ba142017-03-17 00:12:22 +08003444 /*
3445 * won't fail because the vec table is big enough
3446 * to hold all these pages
3447 */
3448 bio_add_page(bio, page, len, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 }
3450 nr_sectors += len>>9;
3451 sector_nr += len>>9;
Ming Lei022e5102017-07-14 16:14:42 +08003452 } while (++page_idx < RESYNC_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 r10_bio->sectors = nr_sectors;
3454
Guoqing Jiang8db87912017-10-24 15:11:52 +08003455 if (mddev_is_clustered(mddev) &&
3456 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3457 /* It is resync not recovery */
3458 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3459 conf->cluster_sync_low = mddev->curr_resync_completed;
3460 raid10_set_cluster_sync_high(conf);
3461 /* Send resync message */
3462 md_cluster_ops->resync_info_update(mddev,
3463 conf->cluster_sync_low,
3464 conf->cluster_sync_high);
3465 }
3466 } else if (mddev_is_clustered(mddev)) {
3467 /* This is recovery not resync */
3468 sector_t sect_va1, sect_va2;
3469 bool broadcast_msg = false;
3470
3471 for (i = 0; i < conf->geo.raid_disks; i++) {
3472 /*
3473 * sector_nr is a device address for recovery, so we
3474 * need translate it to array address before compare
3475 * with cluster_sync_high.
3476 */
3477 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3478
3479 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3480 broadcast_msg = true;
3481 /*
3482 * curr_resync_completed is similar as
3483 * sector_nr, so make the translation too.
3484 */
3485 sect_va2 = raid10_find_virt(conf,
3486 mddev->curr_resync_completed, i);
3487
3488 if (conf->cluster_sync_low == 0 ||
3489 conf->cluster_sync_low > sect_va2)
3490 conf->cluster_sync_low = sect_va2;
3491 }
3492 }
3493 if (broadcast_msg) {
3494 raid10_set_cluster_sync_high(conf);
3495 md_cluster_ops->resync_info_update(mddev,
3496 conf->cluster_sync_low,
3497 conf->cluster_sync_high);
3498 }
3499 }
3500
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 while (biolist) {
3502 bio = biolist;
3503 biolist = biolist->bi_next;
3504
3505 bio->bi_next = NULL;
Ming Leif0250612017-03-17 00:12:33 +08003506 r10_bio = get_resync_r10bio(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 r10_bio->sectors = nr_sectors;
3508
3509 if (bio->bi_end_io == end_sync_read) {
Christoph Hellwig74d46992017-08-23 19:10:32 +02003510 md_sync_acct_bio(bio, nr_sectors);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02003511 bio->bi_status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 generic_make_request(bio);
3513 }
3514 }
3515
NeilBrown57afd892005-06-21 17:17:13 -07003516 if (sectors_skipped)
3517 /* pretend they weren't skipped, it makes
3518 * no important difference in this case
3519 */
3520 md_done_sync(mddev, sectors_skipped, 1);
3521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 return sectors_skipped + nr_sectors;
3523 giveup:
3524 /* There is nowhere to write, so all non-sync
NeilBrowne875ece2011-07-28 11:39:24 +10003525 * drives must be failed or in resync, all drives
3526 * have a bad block, so try the next chunk...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 */
NeilBrown09b40682009-02-25 13:18:47 +11003528 if (sector_nr + max_sync < max_sector)
3529 max_sector = sector_nr + max_sync;
3530
3531 sectors_skipped += (max_sector - sector_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 chunks_skipped ++;
3533 sector_nr = max_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 goto skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535}
3536
Dan Williams80c3a6c2009-03-17 18:10:40 -07003537static sector_t
NeilBrownfd01b882011-10-11 16:47:53 +11003538raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
Dan Williams80c3a6c2009-03-17 18:10:40 -07003539{
3540 sector_t size;
NeilBrowne879a872011-10-11 16:49:02 +11003541 struct r10conf *conf = mddev->private;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003542
3543 if (!raid_disks)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003544 raid_disks = min(conf->geo.raid_disks,
3545 conf->prev.raid_disks);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003546 if (!sectors)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003547 sectors = conf->dev_sectors;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003548
NeilBrown5cf00fc2012-05-21 09:28:20 +10003549 size = sectors >> conf->geo.chunk_shift;
3550 sector_div(size, conf->geo.far_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003551 size = size * raid_disks;
NeilBrown5cf00fc2012-05-21 09:28:20 +10003552 sector_div(size, conf->geo.near_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003553
NeilBrown5cf00fc2012-05-21 09:28:20 +10003554 return size << conf->geo.chunk_shift;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003555}
3556
NeilBrown6508fdb2012-05-17 10:08:45 +10003557static void calc_sectors(struct r10conf *conf, sector_t size)
3558{
3559 /* Calculate the number of sectors-per-device that will
3560 * actually be used, and set conf->dev_sectors and
3561 * conf->stride
3562 */
3563
NeilBrown5cf00fc2012-05-21 09:28:20 +10003564 size = size >> conf->geo.chunk_shift;
3565 sector_div(size, conf->geo.far_copies);
3566 size = size * conf->geo.raid_disks;
3567 sector_div(size, conf->geo.near_copies);
NeilBrown6508fdb2012-05-17 10:08:45 +10003568 /* 'size' is now the number of chunks in the array */
3569 /* calculate "used chunks per device" */
3570 size = size * conf->copies;
3571
3572 /* We need to round up when dividing by raid_disks to
3573 * get the stride size.
3574 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003575 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
NeilBrown6508fdb2012-05-17 10:08:45 +10003576
NeilBrown5cf00fc2012-05-21 09:28:20 +10003577 conf->dev_sectors = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003578
NeilBrown5cf00fc2012-05-21 09:28:20 +10003579 if (conf->geo.far_offset)
3580 conf->geo.stride = 1 << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003581 else {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003582 sector_div(size, conf->geo.far_copies);
3583 conf->geo.stride = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003584 }
3585}
Trela, Maciejdab8b292010-03-08 16:02:45 +11003586
NeilBrowndeb200d2012-05-21 09:28:33 +10003587enum geo_type {geo_new, geo_old, geo_start};
3588static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3589{
3590 int nc, fc, fo;
3591 int layout, chunk, disks;
3592 switch (new) {
3593 case geo_old:
3594 layout = mddev->layout;
3595 chunk = mddev->chunk_sectors;
3596 disks = mddev->raid_disks - mddev->delta_disks;
3597 break;
3598 case geo_new:
3599 layout = mddev->new_layout;
3600 chunk = mddev->new_chunk_sectors;
3601 disks = mddev->raid_disks;
3602 break;
3603 default: /* avoid 'may be unused' warnings */
3604 case geo_start: /* new when starting reshape - raid_disks not
3605 * updated yet. */
3606 layout = mddev->new_layout;
3607 chunk = mddev->new_chunk_sectors;
3608 disks = mddev->raid_disks + mddev->delta_disks;
3609 break;
3610 }
NeilBrown8bce6d32015-10-22 13:20:15 +11003611 if (layout >> 19)
NeilBrowndeb200d2012-05-21 09:28:33 +10003612 return -1;
3613 if (chunk < (PAGE_SIZE >> 9) ||
3614 !is_power_of_2(chunk))
3615 return -2;
3616 nc = layout & 255;
3617 fc = (layout >> 8) & 255;
3618 fo = layout & (1<<16);
3619 geo->raid_disks = disks;
3620 geo->near_copies = nc;
3621 geo->far_copies = fc;
3622 geo->far_offset = fo;
NeilBrown8bce6d32015-10-22 13:20:15 +11003623 switch (layout >> 17) {
3624 case 0: /* original layout. simple but not always optimal */
3625 geo->far_set_size = disks;
3626 break;
3627 case 1: /* "improved" layout which was buggy. Hopefully no-one is
3628 * actually using this, but leave code here just in case.*/
3629 geo->far_set_size = disks/fc;
3630 WARN(geo->far_set_size < fc,
3631 "This RAID10 layout does not provide data safety - please backup and create new array\n");
3632 break;
3633 case 2: /* "improved" layout fixed to match documentation */
3634 geo->far_set_size = fc * nc;
3635 break;
3636 default: /* Not a valid layout */
3637 return -1;
3638 }
NeilBrowndeb200d2012-05-21 09:28:33 +10003639 geo->chunk_mask = chunk - 1;
3640 geo->chunk_shift = ffz(~chunk);
3641 return nc*fc;
3642}
3643
NeilBrowne879a872011-10-11 16:49:02 +11003644static struct r10conf *setup_conf(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645{
NeilBrowne879a872011-10-11 16:49:02 +11003646 struct r10conf *conf = NULL;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003647 int err = -EINVAL;
NeilBrowndeb200d2012-05-21 09:28:33 +10003648 struct geom geo;
3649 int copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650
NeilBrowndeb200d2012-05-21 09:28:33 +10003651 copies = setup_geo(&geo, mddev, geo_new);
3652
3653 if (copies == -2) {
NeilBrown08464e02016-11-02 14:16:50 +11003654 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3655 mdname(mddev), PAGE_SIZE);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003656 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 }
NeilBrown2604b702006-01-06 00:20:36 -08003658
NeilBrowndeb200d2012-05-21 09:28:33 +10003659 if (copies < 2 || copies > mddev->raid_disks) {
NeilBrown08464e02016-11-02 14:16:50 +11003660 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3661 mdname(mddev), mddev->new_layout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 goto out;
3663 }
Trela, Maciejdab8b292010-03-08 16:02:45 +11003664
3665 err = -ENOMEM;
NeilBrowne879a872011-10-11 16:49:02 +11003666 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003667 if (!conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 goto out;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003669
NeilBrown3ea7daa2012-05-22 13:53:47 +10003670 /* FIXME calc properly */
Kees Cook6396bb22018-06-12 14:03:40 -07003671 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3672 sizeof(struct raid10_info),
Trela, Maciejdab8b292010-03-08 16:02:45 +11003673 GFP_KERNEL);
3674 if (!conf->mirrors)
3675 goto out;
NeilBrown4443ae12006-01-06 00:20:28 -08003676
3677 conf->tmppage = alloc_page(GFP_KERNEL);
3678 if (!conf->tmppage)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003679 goto out;
3680
NeilBrowndeb200d2012-05-21 09:28:33 +10003681 conf->geo = geo;
3682 conf->copies = copies;
Kent Overstreetafeee512018-05-20 18:25:52 -04003683 err = mempool_init(&conf->r10bio_pool, NR_RAID10_BIOS, r10bio_pool_alloc,
3684 r10bio_pool_free, conf);
3685 if (err)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003686 goto out;
3687
Kent Overstreetafeee512018-05-20 18:25:52 -04003688 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3689 if (err)
NeilBrownfc9977d2017-04-05 14:05:51 +10003690 goto out;
3691
NeilBrown6508fdb2012-05-17 10:08:45 +10003692 calc_sectors(conf, mddev->dev_sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10003693 if (mddev->reshape_position == MaxSector) {
3694 conf->prev = conf->geo;
3695 conf->reshape_progress = MaxSector;
3696 } else {
3697 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3698 err = -EINVAL;
3699 goto out;
3700 }
3701 conf->reshape_progress = mddev->reshape_position;
3702 if (conf->prev.far_offset)
3703 conf->prev.stride = 1 << conf->prev.chunk_shift;
3704 else
3705 /* far_copies must be 1 */
3706 conf->prev.stride = conf->dev_sectors;
3707 }
NeilBrown299b0682015-07-06 17:37:49 +10003708 conf->reshape_safe = conf->reshape_progress;
Neil Browne7e72bf2008-05-14 16:05:54 -07003709 spin_lock_init(&conf->device_lock);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003710 INIT_LIST_HEAD(&conf->retry_list);
NeilBrown95af5872015-08-14 11:26:17 +10003711 INIT_LIST_HEAD(&conf->bio_end_io_list);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003712
3713 spin_lock_init(&conf->resync_lock);
3714 init_waitqueue_head(&conf->wait_barrier);
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +02003715 atomic_set(&conf->nr_pending, 0);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003716
Kent Overstreetafeee512018-05-20 18:25:52 -04003717 err = -ENOMEM;
NeilBrown02326052012-07-03 15:56:52 +10003718 conf->thread = md_register_thread(raid10d, mddev, "raid10");
Trela, Maciejdab8b292010-03-08 16:02:45 +11003719 if (!conf->thread)
3720 goto out;
3721
Trela, Maciejdab8b292010-03-08 16:02:45 +11003722 conf->mddev = mddev;
3723 return conf;
3724
3725 out:
Trela, Maciejdab8b292010-03-08 16:02:45 +11003726 if (conf) {
Kent Overstreetafeee512018-05-20 18:25:52 -04003727 mempool_exit(&conf->r10bio_pool);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003728 kfree(conf->mirrors);
3729 safe_put_page(conf->tmppage);
Kent Overstreetafeee512018-05-20 18:25:52 -04003730 bioset_exit(&conf->bio_split);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003731 kfree(conf);
3732 }
3733 return ERR_PTR(err);
3734}
3735
Shaohua Li849674e2016-01-20 13:52:20 -08003736static int raid10_run(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003737{
NeilBrowne879a872011-10-11 16:49:02 +11003738 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003739 int i, disk_idx, chunk_size;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003740 struct raid10_info *disk;
NeilBrown3cb03002011-10-11 16:45:26 +11003741 struct md_rdev *rdev;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003742 sector_t size;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003743 sector_t min_offset_diff = 0;
3744 int first = 1;
Shaohua Li532a2a32012-10-11 13:30:52 +11003745 bool discard_supported = false;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003746
NeilBrowna415c0f2017-06-05 16:05:13 +10003747 if (mddev_init_writes_pending(mddev) < 0)
3748 return -ENOMEM;
3749
Trela, Maciejdab8b292010-03-08 16:02:45 +11003750 if (mddev->private == NULL) {
3751 conf = setup_conf(mddev);
3752 if (IS_ERR(conf))
3753 return PTR_ERR(conf);
3754 mddev->private = conf;
3755 }
3756 conf = mddev->private;
3757 if (!conf)
3758 goto out;
3759
Guoqing Jiang8db87912017-10-24 15:11:52 +08003760 if (mddev_is_clustered(conf->mddev)) {
3761 int fc, fo;
3762
3763 fc = (mddev->layout >> 8) & 255;
3764 fo = mddev->layout & (1<<16);
3765 if (fc > 1 || fo > 0) {
3766 pr_err("only near layout is supported by clustered"
3767 " raid10\n");
Lidong Zhong43a52122018-01-23 23:06:12 +08003768 goto out_free_conf;
Guoqing Jiang8db87912017-10-24 15:11:52 +08003769 }
3770 }
3771
Trela, Maciejdab8b292010-03-08 16:02:45 +11003772 mddev->thread = conf->thread;
3773 conf->thread = NULL;
3774
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003775 chunk_size = mddev->chunk_sectors << 9;
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003776 if (mddev->queue) {
Shaohua Li532a2a32012-10-11 13:30:52 +11003777 blk_queue_max_discard_sectors(mddev->queue,
3778 mddev->chunk_sectors);
H. Peter Anvin5026d7a2013-06-12 07:37:43 -07003779 blk_queue_max_write_same_sectors(mddev->queue, 0);
Christoph Hellwig3deff1a2017-04-05 19:21:03 +02003780 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003781 blk_queue_io_min(mddev->queue, chunk_size);
3782 if (conf->geo.raid_disks % conf->geo.near_copies)
3783 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3784 else
3785 blk_queue_io_opt(mddev->queue, chunk_size *
3786 (conf->geo.raid_disks / conf->geo.near_copies));
3787 }
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003788
NeilBrowndafb20f2012-03-19 12:46:39 +11003789 rdev_for_each(rdev, mddev) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10003790 long long diff;
NeilBrown34b343c2011-07-28 11:31:47 +10003791
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 disk_idx = rdev->raid_disk;
NeilBrownf8c9e742012-05-21 09:28:33 +10003793 if (disk_idx < 0)
3794 continue;
3795 if (disk_idx >= conf->geo.raid_disks &&
3796 disk_idx >= conf->prev.raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 continue;
3798 disk = conf->mirrors + disk_idx;
3799
NeilBrown56a2559b2011-12-23 10:17:55 +11003800 if (test_bit(Replacement, &rdev->flags)) {
3801 if (disk->replacement)
3802 goto out_free_conf;
3803 disk->replacement = rdev;
3804 } else {
3805 if (disk->rdev)
3806 goto out_free_conf;
3807 disk->rdev = rdev;
3808 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10003809 diff = (rdev->new_data_offset - rdev->data_offset);
3810 if (!mddev->reshape_backwards)
3811 diff = -diff;
3812 if (diff < 0)
3813 diff = 0;
3814 if (first || diff < min_offset_diff)
3815 min_offset_diff = diff;
NeilBrown56a2559b2011-12-23 10:17:55 +11003816
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003817 if (mddev->gendisk)
3818 disk_stack_limits(mddev->gendisk, rdev->bdev,
3819 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820
3821 disk->head_position = 0;
Shaohua Li532a2a32012-10-11 13:30:52 +11003822
3823 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3824 discard_supported = true;
Guoqing Jiang6f287ca2017-04-06 09:12:18 +08003825 first = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10003827
Jonathan Brassowed30be02012-10-31 11:42:30 +11003828 if (mddev->queue) {
3829 if (discard_supported)
Bart Van Assche8b904b52018-03-07 17:10:10 -08003830 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
Jonathan Brassowed30be02012-10-31 11:42:30 +11003831 mddev->queue);
3832 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08003833 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
Jonathan Brassowed30be02012-10-31 11:42:30 +11003834 mddev->queue);
3835 }
NeilBrown6d508242005-09-09 16:24:03 -07003836 /* need to check that every block has at least one working mirror */
NeilBrown700c7212011-07-27 11:00:36 +10003837 if (!enough(conf, -1)) {
NeilBrown08464e02016-11-02 14:16:50 +11003838 pr_err("md/raid10:%s: not enough operational mirrors.\n",
NeilBrown6d508242005-09-09 16:24:03 -07003839 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 goto out_free_conf;
3841 }
3842
NeilBrown3ea7daa2012-05-22 13:53:47 +10003843 if (conf->reshape_progress != MaxSector) {
3844 /* must ensure that shape change is supported */
3845 if (conf->geo.far_copies != 1 &&
3846 conf->geo.far_offset == 0)
3847 goto out_free_conf;
3848 if (conf->prev.far_copies != 1 &&
NeilBrown78eaa0d2013-07-02 15:58:05 +10003849 conf->prev.far_offset == 0)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003850 goto out_free_conf;
3851 }
3852
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 mddev->degraded = 0;
NeilBrownf8c9e742012-05-21 09:28:33 +10003854 for (i = 0;
3855 i < conf->geo.raid_disks
3856 || i < conf->prev.raid_disks;
3857 i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858
3859 disk = conf->mirrors + i;
3860
NeilBrown56a2559b2011-12-23 10:17:55 +11003861 if (!disk->rdev && disk->replacement) {
3862 /* The replacement is all we have - use it */
3863 disk->rdev = disk->replacement;
3864 disk->replacement = NULL;
3865 clear_bit(Replacement, &disk->rdev->flags);
3866 }
3867
NeilBrown5fd6c1d2006-06-26 00:27:40 -07003868 if (!disk->rdev ||
NeilBrown2e333e82006-10-21 10:24:07 -07003869 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 disk->head_position = 0;
3871 mddev->degraded++;
NeilBrown0b59bb62014-01-14 16:30:10 +11003872 if (disk->rdev &&
3873 disk->rdev->saved_raid_disk < 0)
Neil Brown8c2e8702008-06-28 08:30:52 +10003874 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 }
BingJing Changbda31532018-06-28 18:40:11 +08003876
3877 if (disk->replacement &&
3878 !test_bit(In_sync, &disk->replacement->flags) &&
3879 disk->replacement->saved_raid_disk < 0) {
3880 conf->fullsync = 1;
3881 }
3882
NeilBrownd890fa22011-10-26 11:54:39 +11003883 disk->recovery_disabled = mddev->recovery_disabled - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 }
3885
Andre Noll8c6ac8682009-06-18 08:48:06 +10003886 if (mddev->recovery_cp != MaxSector)
NeilBrown08464e02016-11-02 14:16:50 +11003887 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
3888 mdname(mddev));
3889 pr_info("md/raid10:%s: active with %d out of %d devices\n",
NeilBrown5cf00fc2012-05-21 09:28:20 +10003890 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3891 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 /*
3893 * Ok, everything is just fine now
3894 */
Trela, Maciejdab8b292010-03-08 16:02:45 +11003895 mddev->dev_sectors = conf->dev_sectors;
3896 size = raid10_size(mddev, 0, 0);
3897 md_set_array_sectors(mddev, size);
3898 mddev->resync_max_sectors = size;
NeilBrown46533ff2016-11-18 16:16:11 +11003899 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003901 if (mddev->queue) {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003902 int stripe = conf->geo.raid_disks *
Andre Noll9d8f0362009-06-18 08:45:01 +10003903 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003904
3905 /* Calculate max read-ahead size.
3906 * We need to readahead at least twice a whole stripe....
3907 * maybe...
3908 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003909 stripe /= conf->geo.near_copies;
Jan Karadc3b17c2017-02-02 15:56:50 +01003910 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
3911 mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 }
3913
Martin K. Petersena91a2782011-03-17 11:11:05 +01003914 if (md_integrity_register(mddev))
3915 goto out_free_conf;
3916
NeilBrown3ea7daa2012-05-22 13:53:47 +10003917 if (conf->reshape_progress != MaxSector) {
3918 unsigned long before_length, after_length;
3919
3920 before_length = ((1 << conf->prev.chunk_shift) *
3921 conf->prev.far_copies);
3922 after_length = ((1 << conf->geo.chunk_shift) *
3923 conf->geo.far_copies);
3924
3925 if (max(before_length, after_length) > min_offset_diff) {
3926 /* This cannot work */
NeilBrown08464e02016-11-02 14:16:50 +11003927 pr_warn("md/raid10: offset difference not enough to continue reshape\n");
NeilBrown3ea7daa2012-05-22 13:53:47 +10003928 goto out_free_conf;
3929 }
3930 conf->offset_diff = min_offset_diff;
3931
NeilBrown3ea7daa2012-05-22 13:53:47 +10003932 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3933 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3934 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3935 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3936 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3937 "reshape");
3938 }
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 return 0;
3941
3942out_free_conf:
NeilBrown01f96c02011-09-21 15:30:20 +10003943 md_unregister_thread(&mddev->thread);
Kent Overstreetafeee512018-05-20 18:25:52 -04003944 mempool_exit(&conf->r10bio_pool);
NeilBrown1345b1d2006-01-06 00:20:40 -08003945 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003946 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 kfree(conf);
3948 mddev->private = NULL;
3949out:
3950 return -EIO;
3951}
3952
NeilBrownafa0f552014-12-15 12:56:58 +11003953static void raid10_free(struct mddev *mddev, void *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954{
NeilBrownafa0f552014-12-15 12:56:58 +11003955 struct r10conf *conf = priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Kent Overstreetafeee512018-05-20 18:25:52 -04003957 mempool_exit(&conf->r10bio_pool);
Hirokazu Takahashi0fea7ed2013-04-24 11:42:44 +10003958 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003959 kfree(conf->mirrors);
NeilBrownc4796e22014-08-23 20:19:26 +10003960 kfree(conf->mirrors_old);
3961 kfree(conf->mirrors_new);
Kent Overstreetafeee512018-05-20 18:25:52 -04003962 bioset_exit(&conf->bio_split);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 kfree(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964}
3965
NeilBrownb03e0cc2017-10-19 12:49:15 +11003966static void raid10_quiesce(struct mddev *mddev, int quiesce)
NeilBrown6cce3b22006-01-06 00:20:16 -08003967{
NeilBrowne879a872011-10-11 16:49:02 +11003968 struct r10conf *conf = mddev->private;
NeilBrown6cce3b22006-01-06 00:20:16 -08003969
NeilBrownb03e0cc2017-10-19 12:49:15 +11003970 if (quiesce)
NeilBrown6cce3b22006-01-06 00:20:16 -08003971 raise_barrier(conf, 0);
NeilBrownb03e0cc2017-10-19 12:49:15 +11003972 else
NeilBrown6cce3b22006-01-06 00:20:16 -08003973 lower_barrier(conf);
NeilBrown6cce3b22006-01-06 00:20:16 -08003974}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975
NeilBrown006a09a2012-03-19 12:46:40 +11003976static int raid10_resize(struct mddev *mddev, sector_t sectors)
3977{
3978 /* Resize of 'far' arrays is not supported.
3979 * For 'near' and 'offset' arrays we can set the
3980 * number of sectors used to be an appropriate multiple
3981 * of the chunk size.
3982 * For 'offset', this is far_copies*chunksize.
3983 * For 'near' the multiplier is the LCM of
3984 * near_copies and raid_disks.
3985 * So if far_copies > 1 && !far_offset, fail.
3986 * Else find LCM(raid_disks, near_copy)*far_copies and
3987 * multiply by chunk_size. Then round to this number.
3988 * This is mostly done by raid10_size()
3989 */
3990 struct r10conf *conf = mddev->private;
3991 sector_t oldsize, size;
3992
NeilBrownf8c9e742012-05-21 09:28:33 +10003993 if (mddev->reshape_position != MaxSector)
3994 return -EBUSY;
3995
NeilBrown5cf00fc2012-05-21 09:28:20 +10003996 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
NeilBrown006a09a2012-03-19 12:46:40 +11003997 return -EINVAL;
3998
3999 oldsize = raid10_size(mddev, 0, 0);
4000 size = raid10_size(mddev, sectors, 0);
NeilBrowna4a61252012-05-22 13:55:27 +10004001 if (mddev->external_size &&
4002 mddev->array_sectors > size)
NeilBrown006a09a2012-03-19 12:46:40 +11004003 return -EINVAL;
NeilBrowna4a61252012-05-22 13:55:27 +10004004 if (mddev->bitmap) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004005 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
NeilBrowna4a61252012-05-22 13:55:27 +10004006 if (ret)
4007 return ret;
4008 }
4009 md_set_array_sectors(mddev, size);
NeilBrown006a09a2012-03-19 12:46:40 +11004010 if (sectors > mddev->dev_sectors &&
4011 mddev->recovery_cp > oldsize) {
4012 mddev->recovery_cp = oldsize;
4013 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4014 }
NeilBrown6508fdb2012-05-17 10:08:45 +10004015 calc_sectors(conf, sectors);
4016 mddev->dev_sectors = conf->dev_sectors;
NeilBrown006a09a2012-03-19 12:46:40 +11004017 mddev->resync_max_sectors = size;
4018 return 0;
4019}
4020
NeilBrown53a6ab42015-02-12 14:09:57 +11004021static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
Trela, Maciejdab8b292010-03-08 16:02:45 +11004022{
NeilBrown3cb03002011-10-11 16:45:26 +11004023 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +11004024 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11004025
4026 if (mddev->degraded > 0) {
NeilBrown08464e02016-11-02 14:16:50 +11004027 pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4028 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11004029 return ERR_PTR(-EINVAL);
4030 }
NeilBrown53a6ab42015-02-12 14:09:57 +11004031 sector_div(size, devs);
Trela, Maciejdab8b292010-03-08 16:02:45 +11004032
Trela, Maciejdab8b292010-03-08 16:02:45 +11004033 /* Set new parameters */
4034 mddev->new_level = 10;
4035 /* new layout: far_copies = 1, near_copies = 2 */
4036 mddev->new_layout = (1<<8) + 2;
4037 mddev->new_chunk_sectors = mddev->chunk_sectors;
4038 mddev->delta_disks = mddev->raid_disks;
Trela, Maciejdab8b292010-03-08 16:02:45 +11004039 mddev->raid_disks *= 2;
4040 /* make sure it will be not marked as dirty */
4041 mddev->recovery_cp = MaxSector;
NeilBrown53a6ab42015-02-12 14:09:57 +11004042 mddev->dev_sectors = size;
Trela, Maciejdab8b292010-03-08 16:02:45 +11004043
4044 conf = setup_conf(mddev);
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01004045 if (!IS_ERR(conf)) {
NeilBrowndafb20f2012-03-19 12:46:39 +11004046 rdev_for_each(rdev, mddev)
NeilBrown53a6ab42015-02-12 14:09:57 +11004047 if (rdev->raid_disk >= 0) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004048 rdev->new_raid_disk = rdev->raid_disk * 2;
NeilBrown53a6ab42015-02-12 14:09:57 +11004049 rdev->sectors = size;
4050 }
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01004051 conf->barrier = 1;
4052 }
4053
Trela, Maciejdab8b292010-03-08 16:02:45 +11004054 return conf;
4055}
4056
NeilBrownfd01b882011-10-11 16:47:53 +11004057static void *raid10_takeover(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11004058{
NeilBrowne373ab12011-10-11 16:48:59 +11004059 struct r0conf *raid0_conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11004060
4061 /* raid10 can take over:
4062 * raid0 - providing it has only two drives
4063 */
4064 if (mddev->level == 0) {
4065 /* for raid0 takeover only one zone is supported */
NeilBrowne373ab12011-10-11 16:48:59 +11004066 raid0_conf = mddev->private;
4067 if (raid0_conf->nr_strip_zones > 1) {
NeilBrown08464e02016-11-02 14:16:50 +11004068 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4069 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11004070 return ERR_PTR(-EINVAL);
4071 }
NeilBrown53a6ab42015-02-12 14:09:57 +11004072 return raid10_takeover_raid0(mddev,
4073 raid0_conf->strip_zone->zone_end,
4074 raid0_conf->strip_zone->nb_dev);
Trela, Maciejdab8b292010-03-08 16:02:45 +11004075 }
4076 return ERR_PTR(-EINVAL);
4077}
4078
NeilBrown3ea7daa2012-05-22 13:53:47 +10004079static int raid10_check_reshape(struct mddev *mddev)
4080{
4081 /* Called when there is a request to change
4082 * - layout (to ->new_layout)
4083 * - chunk size (to ->new_chunk_sectors)
4084 * - raid_disks (by delta_disks)
4085 * or when trying to restart a reshape that was ongoing.
4086 *
4087 * We need to validate the request and possibly allocate
4088 * space if that might be an issue later.
4089 *
4090 * Currently we reject any reshape of a 'far' mode array,
4091 * allow chunk size to change if new is generally acceptable,
4092 * allow raid_disks to increase, and allow
4093 * a switch between 'near' mode and 'offset' mode.
4094 */
4095 struct r10conf *conf = mddev->private;
4096 struct geom geo;
4097
4098 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4099 return -EINVAL;
4100
4101 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4102 /* mustn't change number of copies */
4103 return -EINVAL;
4104 if (geo.far_copies > 1 && !geo.far_offset)
4105 /* Cannot switch to 'far' mode */
4106 return -EINVAL;
4107
4108 if (mddev->array_sectors & geo.chunk_mask)
4109 /* not factor of array size */
4110 return -EINVAL;
4111
NeilBrown3ea7daa2012-05-22 13:53:47 +10004112 if (!enough(conf, -1))
4113 return -EINVAL;
4114
4115 kfree(conf->mirrors_new);
4116 conf->mirrors_new = NULL;
4117 if (mddev->delta_disks > 0) {
4118 /* allocate new 'mirrors' list */
Kees Cook6396bb22018-06-12 14:03:40 -07004119 conf->mirrors_new =
4120 kcalloc(mddev->raid_disks + mddev->delta_disks,
4121 sizeof(struct raid10_info),
4122 GFP_KERNEL);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004123 if (!conf->mirrors_new)
4124 return -ENOMEM;
4125 }
4126 return 0;
4127}
4128
4129/*
4130 * Need to check if array has failed when deciding whether to:
4131 * - start an array
4132 * - remove non-faulty devices
4133 * - add a spare
4134 * - allow a reshape
4135 * This determination is simple when no reshape is happening.
4136 * However if there is a reshape, we need to carefully check
4137 * both the before and after sections.
4138 * This is because some failed devices may only affect one
4139 * of the two sections, and some non-in_sync devices may
4140 * be insync in the section most affected by failed devices.
4141 */
4142static int calc_degraded(struct r10conf *conf)
4143{
4144 int degraded, degraded2;
4145 int i;
4146
4147 rcu_read_lock();
4148 degraded = 0;
4149 /* 'prev' section first */
4150 for (i = 0; i < conf->prev.raid_disks; i++) {
4151 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4152 if (!rdev || test_bit(Faulty, &rdev->flags))
4153 degraded++;
4154 else if (!test_bit(In_sync, &rdev->flags))
4155 /* When we can reduce the number of devices in
4156 * an array, this might not contribute to
4157 * 'degraded'. It does now.
4158 */
4159 degraded++;
4160 }
4161 rcu_read_unlock();
4162 if (conf->geo.raid_disks == conf->prev.raid_disks)
4163 return degraded;
4164 rcu_read_lock();
4165 degraded2 = 0;
4166 for (i = 0; i < conf->geo.raid_disks; i++) {
4167 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4168 if (!rdev || test_bit(Faulty, &rdev->flags))
4169 degraded2++;
4170 else if (!test_bit(In_sync, &rdev->flags)) {
4171 /* If reshape is increasing the number of devices,
4172 * this section has already been recovered, so
4173 * it doesn't contribute to degraded.
4174 * else it does.
4175 */
4176 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4177 degraded2++;
4178 }
4179 }
4180 rcu_read_unlock();
4181 if (degraded2 > degraded)
4182 return degraded2;
4183 return degraded;
4184}
4185
4186static int raid10_start_reshape(struct mddev *mddev)
4187{
4188 /* A 'reshape' has been requested. This commits
4189 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4190 * This also checks if there are enough spares and adds them
4191 * to the array.
4192 * We currently require enough spares to make the final
4193 * array non-degraded. We also require that the difference
4194 * between old and new data_offset - on each device - is
4195 * enough that we never risk over-writing.
4196 */
4197
4198 unsigned long before_length, after_length;
4199 sector_t min_offset_diff = 0;
4200 int first = 1;
4201 struct geom new;
4202 struct r10conf *conf = mddev->private;
4203 struct md_rdev *rdev;
4204 int spares = 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004205 int ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004206
4207 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4208 return -EBUSY;
4209
4210 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4211 return -EINVAL;
4212
4213 before_length = ((1 << conf->prev.chunk_shift) *
4214 conf->prev.far_copies);
4215 after_length = ((1 << conf->geo.chunk_shift) *
4216 conf->geo.far_copies);
4217
4218 rdev_for_each(rdev, mddev) {
4219 if (!test_bit(In_sync, &rdev->flags)
4220 && !test_bit(Faulty, &rdev->flags))
4221 spares++;
4222 if (rdev->raid_disk >= 0) {
4223 long long diff = (rdev->new_data_offset
4224 - rdev->data_offset);
4225 if (!mddev->reshape_backwards)
4226 diff = -diff;
4227 if (diff < 0)
4228 diff = 0;
4229 if (first || diff < min_offset_diff)
4230 min_offset_diff = diff;
Shaohua Lib5063352017-05-01 12:15:07 -07004231 first = 0;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004232 }
4233 }
4234
4235 if (max(before_length, after_length) > min_offset_diff)
4236 return -EINVAL;
4237
4238 if (spares < mddev->delta_disks)
4239 return -EINVAL;
4240
4241 conf->offset_diff = min_offset_diff;
4242 spin_lock_irq(&conf->device_lock);
4243 if (conf->mirrors_new) {
4244 memcpy(conf->mirrors_new, conf->mirrors,
Jonathan Brassowdc280d982012-07-31 10:03:52 +10004245 sizeof(struct raid10_info)*conf->prev.raid_disks);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004246 smp_mb();
NeilBrownc4796e22014-08-23 20:19:26 +10004247 kfree(conf->mirrors_old);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004248 conf->mirrors_old = conf->mirrors;
4249 conf->mirrors = conf->mirrors_new;
4250 conf->mirrors_new = NULL;
4251 }
4252 setup_geo(&conf->geo, mddev, geo_start);
4253 smp_mb();
4254 if (mddev->reshape_backwards) {
4255 sector_t size = raid10_size(mddev, 0, 0);
4256 if (size < mddev->array_sectors) {
4257 spin_unlock_irq(&conf->device_lock);
NeilBrown08464e02016-11-02 14:16:50 +11004258 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4259 mdname(mddev));
NeilBrown3ea7daa2012-05-22 13:53:47 +10004260 return -EINVAL;
4261 }
4262 mddev->resync_max_sectors = size;
4263 conf->reshape_progress = size;
4264 } else
4265 conf->reshape_progress = 0;
NeilBrown299b0682015-07-06 17:37:49 +10004266 conf->reshape_safe = conf->reshape_progress;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004267 spin_unlock_irq(&conf->device_lock);
4268
NeilBrownbb63a702012-05-22 13:55:28 +10004269 if (mddev->delta_disks && mddev->bitmap) {
Guoqing Jiangafd75622018-10-18 16:37:41 +08004270 struct mdp_superblock_1 *sb = NULL;
4271 sector_t oldsize, newsize;
4272
4273 oldsize = raid10_size(mddev, 0, 0);
4274 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4275
4276 if (!mddev_is_clustered(mddev)) {
4277 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4278 if (ret)
4279 goto abort;
4280 else
4281 goto out;
4282 }
4283
4284 rdev_for_each(rdev, mddev) {
4285 if (rdev->raid_disk > -1 &&
4286 !test_bit(Faulty, &rdev->flags))
4287 sb = page_address(rdev->sb_page);
4288 }
4289
4290 /*
4291 * some node is already performing reshape, and no need to
4292 * call md_bitmap_resize again since it should be called when
4293 * receiving BITMAP_RESIZE msg
4294 */
4295 if ((sb && (le32_to_cpu(sb->feature_map) &
4296 MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4297 goto out;
4298
4299 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
NeilBrownbb63a702012-05-22 13:55:28 +10004300 if (ret)
4301 goto abort;
Guoqing Jiangafd75622018-10-18 16:37:41 +08004302
4303 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4304 if (ret) {
4305 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4306 goto abort;
4307 }
NeilBrownbb63a702012-05-22 13:55:28 +10004308 }
Guoqing Jiangafd75622018-10-18 16:37:41 +08004309out:
NeilBrown3ea7daa2012-05-22 13:53:47 +10004310 if (mddev->delta_disks > 0) {
4311 rdev_for_each(rdev, mddev)
4312 if (rdev->raid_disk < 0 &&
4313 !test_bit(Faulty, &rdev->flags)) {
4314 if (raid10_add_disk(mddev, rdev) == 0) {
4315 if (rdev->raid_disk >=
4316 conf->prev.raid_disks)
4317 set_bit(In_sync, &rdev->flags);
4318 else
4319 rdev->recovery_offset = 0;
4320
4321 if (sysfs_link_rdev(mddev, rdev))
4322 /* Failure here is OK */;
4323 }
4324 } else if (rdev->raid_disk >= conf->prev.raid_disks
4325 && !test_bit(Faulty, &rdev->flags)) {
4326 /* This is a spare that was manually added */
4327 set_bit(In_sync, &rdev->flags);
4328 }
4329 }
4330 /* When a reshape changes the number of devices,
4331 * ->degraded is measured against the larger of the
4332 * pre and post numbers.
4333 */
4334 spin_lock_irq(&conf->device_lock);
4335 mddev->degraded = calc_degraded(conf);
4336 spin_unlock_irq(&conf->device_lock);
4337 mddev->raid_disks = conf->geo.raid_disks;
4338 mddev->reshape_position = conf->reshape_progress;
Shaohua Li29530792016-12-08 15:48:19 -08004339 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004340
4341 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4342 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10004343 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004344 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4345 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4346
4347 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4348 "reshape");
4349 if (!mddev->sync_thread) {
NeilBrownbb63a702012-05-22 13:55:28 +10004350 ret = -EAGAIN;
4351 goto abort;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004352 }
4353 conf->reshape_checkpoint = jiffies;
4354 md_wakeup_thread(mddev->sync_thread);
4355 md_new_event(mddev);
4356 return 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004357
4358abort:
4359 mddev->recovery = 0;
4360 spin_lock_irq(&conf->device_lock);
4361 conf->geo = conf->prev;
4362 mddev->raid_disks = conf->geo.raid_disks;
4363 rdev_for_each(rdev, mddev)
4364 rdev->new_data_offset = rdev->data_offset;
4365 smp_wmb();
4366 conf->reshape_progress = MaxSector;
NeilBrown299b0682015-07-06 17:37:49 +10004367 conf->reshape_safe = MaxSector;
NeilBrownbb63a702012-05-22 13:55:28 +10004368 mddev->reshape_position = MaxSector;
4369 spin_unlock_irq(&conf->device_lock);
4370 return ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004371}
4372
4373/* Calculate the last device-address that could contain
4374 * any block from the chunk that includes the array-address 's'
4375 * and report the next address.
4376 * i.e. the address returned will be chunk-aligned and after
4377 * any data that is in the chunk containing 's'.
4378 */
4379static sector_t last_dev_address(sector_t s, struct geom *geo)
4380{
4381 s = (s | geo->chunk_mask) + 1;
4382 s >>= geo->chunk_shift;
4383 s *= geo->near_copies;
4384 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4385 s *= geo->far_copies;
4386 s <<= geo->chunk_shift;
4387 return s;
4388}
4389
4390/* Calculate the first device-address that could contain
4391 * any block from the chunk that includes the array-address 's'.
4392 * This too will be the start of a chunk
4393 */
4394static sector_t first_dev_address(sector_t s, struct geom *geo)
4395{
4396 s >>= geo->chunk_shift;
4397 s *= geo->near_copies;
4398 sector_div(s, geo->raid_disks);
4399 s *= geo->far_copies;
4400 s <<= geo->chunk_shift;
4401 return s;
4402}
4403
4404static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4405 int *skipped)
4406{
4407 /* We simply copy at most one chunk (smallest of old and new)
4408 * at a time, possibly less if that exceeds RESYNC_PAGES,
4409 * or we hit a bad block or something.
4410 * This might mean we pause for normal IO in the middle of
NeilBrown02ec5022015-07-06 16:33:47 +10004411 * a chunk, but that is not a problem as mddev->reshape_position
NeilBrown3ea7daa2012-05-22 13:53:47 +10004412 * can record any location.
4413 *
4414 * If we will want to write to a location that isn't
4415 * yet recorded as 'safe' (i.e. in metadata on disk) then
4416 * we need to flush all reshape requests and update the metadata.
4417 *
4418 * When reshaping forwards (e.g. to more devices), we interpret
4419 * 'safe' as the earliest block which might not have been copied
4420 * down yet. We divide this by previous stripe size and multiply
4421 * by previous stripe length to get lowest device offset that we
4422 * cannot write to yet.
4423 * We interpret 'sector_nr' as an address that we want to write to.
4424 * From this we use last_device_address() to find where we might
4425 * write to, and first_device_address on the 'safe' position.
4426 * If this 'next' write position is after the 'safe' position,
4427 * we must update the metadata to increase the 'safe' position.
4428 *
4429 * When reshaping backwards, we round in the opposite direction
4430 * and perform the reverse test: next write position must not be
4431 * less than current safe position.
4432 *
4433 * In all this the minimum difference in data offsets
4434 * (conf->offset_diff - always positive) allows a bit of slack,
NeilBrown02ec5022015-07-06 16:33:47 +10004435 * so next can be after 'safe', but not by more than offset_diff
NeilBrown3ea7daa2012-05-22 13:53:47 +10004436 *
4437 * We need to prepare all the bios here before we start any IO
4438 * to ensure the size we choose is acceptable to all devices.
4439 * The means one for each copy for write-out and an extra one for
4440 * read-in.
4441 * We store the read-in bio in ->master_bio and the others in
4442 * ->devs[x].bio and ->devs[x].repl_bio.
4443 */
4444 struct r10conf *conf = mddev->private;
4445 struct r10bio *r10_bio;
4446 sector_t next, safe, last;
4447 int max_sectors;
4448 int nr_sectors;
4449 int s;
4450 struct md_rdev *rdev;
4451 int need_flush = 0;
4452 struct bio *blist;
4453 struct bio *bio, *read_bio;
4454 int sectors_done = 0;
Ming Leif0250612017-03-17 00:12:33 +08004455 struct page **pages;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004456
4457 if (sector_nr == 0) {
4458 /* If restarting in the middle, skip the initial sectors */
4459 if (mddev->reshape_backwards &&
4460 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4461 sector_nr = (raid10_size(mddev, 0, 0)
4462 - conf->reshape_progress);
4463 } else if (!mddev->reshape_backwards &&
4464 conf->reshape_progress > 0)
4465 sector_nr = conf->reshape_progress;
4466 if (sector_nr) {
4467 mddev->curr_resync_completed = sector_nr;
4468 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4469 *skipped = 1;
4470 return sector_nr;
4471 }
4472 }
4473
4474 /* We don't use sector_nr to track where we are up to
4475 * as that doesn't work well for ->reshape_backwards.
4476 * So just use ->reshape_progress.
4477 */
4478 if (mddev->reshape_backwards) {
4479 /* 'next' is the earliest device address that we might
4480 * write to for this chunk in the new layout
4481 */
4482 next = first_dev_address(conf->reshape_progress - 1,
4483 &conf->geo);
4484
4485 /* 'safe' is the last device address that we might read from
4486 * in the old layout after a restart
4487 */
4488 safe = last_dev_address(conf->reshape_safe - 1,
4489 &conf->prev);
4490
4491 if (next + conf->offset_diff < safe)
4492 need_flush = 1;
4493
4494 last = conf->reshape_progress - 1;
4495 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4496 & conf->prev.chunk_mask);
4497 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4498 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4499 } else {
4500 /* 'next' is after the last device address that we
4501 * might write to for this chunk in the new layout
4502 */
4503 next = last_dev_address(conf->reshape_progress, &conf->geo);
4504
4505 /* 'safe' is the earliest device address that we might
4506 * read from in the old layout after a restart
4507 */
4508 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4509
4510 /* Need to update metadata if 'next' might be beyond 'safe'
4511 * as that would possibly corrupt data
4512 */
4513 if (next > safe + conf->offset_diff)
4514 need_flush = 1;
4515
4516 sector_nr = conf->reshape_progress;
4517 last = sector_nr | (conf->geo.chunk_mask
4518 & conf->prev.chunk_mask);
4519
4520 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4521 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4522 }
4523
4524 if (need_flush ||
4525 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4526 /* Need to update reshape_position in metadata */
4527 wait_barrier(conf);
4528 mddev->reshape_position = conf->reshape_progress;
4529 if (mddev->reshape_backwards)
4530 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4531 - conf->reshape_progress;
4532 else
4533 mddev->curr_resync_completed = conf->reshape_progress;
4534 conf->reshape_checkpoint = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08004535 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004536 md_wakeup_thread(mddev->thread);
Shaohua Li29530792016-12-08 15:48:19 -08004537 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
NeilBrownc91abf52013-11-19 12:02:01 +11004538 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4539 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4540 allow_barrier(conf);
4541 return sectors_done;
4542 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004543 conf->reshape_safe = mddev->reshape_position;
4544 allow_barrier(conf);
4545 }
4546
Xiao Ni1d0ffd22018-08-30 15:57:09 +08004547 raise_barrier(conf, 0);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004548read_more:
4549 /* Now schedule reads for blocks from sector_nr to last */
Shaohua Li208410b2017-08-24 17:50:40 -07004550 r10_bio = raid10_alloc_init_r10buf(conf);
NeilBrowncb8b12b2014-08-18 14:38:45 +10004551 r10_bio->state = 0;
Xiao Ni1d0ffd22018-08-30 15:57:09 +08004552 raise_barrier(conf, 1);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004553 atomic_set(&r10_bio->remaining, 0);
4554 r10_bio->mddev = mddev;
4555 r10_bio->sector = sector_nr;
4556 set_bit(R10BIO_IsReshape, &r10_bio->state);
4557 r10_bio->sectors = last - sector_nr + 1;
4558 rdev = read_balance(conf, r10_bio, &max_sectors);
4559 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4560
4561 if (!rdev) {
4562 /* Cannot read from here, so need to record bad blocks
4563 * on all the target devices.
4564 */
4565 // FIXME
Kent Overstreetafeee512018-05-20 18:25:52 -04004566 mempool_free(r10_bio, &conf->r10buf_pool);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004567 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4568 return sectors_done;
4569 }
4570
4571 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4572
Christoph Hellwig74d46992017-08-23 19:10:32 +02004573 bio_set_dev(read_bio, rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07004574 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
NeilBrown3ea7daa2012-05-22 13:53:47 +10004575 + rdev->data_offset);
4576 read_bio->bi_private = r10_bio;
Ming Lei81fa1522017-03-17 00:12:32 +08004577 read_bio->bi_end_io = end_reshape_read;
Mike Christie796a5cf2016-06-05 14:32:07 -05004578 bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
NeilBrownce0b0a42014-08-18 13:56:38 +10004579 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02004580 read_bio->bi_status = 0;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004581 read_bio->bi_vcnt = 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -07004582 read_bio->bi_iter.bi_size = 0;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004583 r10_bio->master_bio = read_bio;
4584 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4585
Guoqing Jiang7564bed2018-10-18 16:37:42 +08004586 /*
4587 * Broadcast RESYNC message to other nodes, so all nodes would not
4588 * write to the region to avoid conflict.
4589 */
4590 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4591 struct mdp_superblock_1 *sb = NULL;
4592 int sb_reshape_pos = 0;
4593
4594 conf->cluster_sync_low = sector_nr;
4595 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4596 sb = page_address(rdev->sb_page);
4597 if (sb) {
4598 sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4599 /*
4600 * Set cluster_sync_low again if next address for array
4601 * reshape is less than cluster_sync_low. Since we can't
4602 * update cluster_sync_low until it has finished reshape.
4603 */
4604 if (sb_reshape_pos < conf->cluster_sync_low)
4605 conf->cluster_sync_low = sb_reshape_pos;
4606 }
4607
4608 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4609 conf->cluster_sync_high);
4610 }
4611
NeilBrown3ea7daa2012-05-22 13:53:47 +10004612 /* Now find the locations in the new layout */
4613 __raid10_find_phys(&conf->geo, r10_bio);
4614
4615 blist = read_bio;
4616 read_bio->bi_next = NULL;
4617
NeilBrownd094d682016-06-02 16:19:52 +10004618 rcu_read_lock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004619 for (s = 0; s < conf->copies*2; s++) {
4620 struct bio *b;
4621 int d = r10_bio->devs[s/2].devnum;
4622 struct md_rdev *rdev2;
4623 if (s&1) {
NeilBrownd094d682016-06-02 16:19:52 +10004624 rdev2 = rcu_dereference(conf->mirrors[d].replacement);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004625 b = r10_bio->devs[s/2].repl_bio;
4626 } else {
NeilBrownd094d682016-06-02 16:19:52 +10004627 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004628 b = r10_bio->devs[s/2].bio;
4629 }
4630 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4631 continue;
Kent Overstreet8be185f2012-09-06 14:14:43 -07004632
Christoph Hellwig74d46992017-08-23 19:10:32 +02004633 bio_set_dev(b, rdev2->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07004634 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4635 rdev2->new_data_offset;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004636 b->bi_end_io = end_reshape_write;
Mike Christie796a5cf2016-06-05 14:32:07 -05004637 bio_set_op_attrs(b, REQ_OP_WRITE, 0);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004638 b->bi_next = blist;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004639 blist = b;
4640 }
4641
4642 /* Now add as many pages as possible to all of these bios. */
4643
4644 nr_sectors = 0;
Ming Leif0250612017-03-17 00:12:33 +08004645 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004646 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
Ming Leif0250612017-03-17 00:12:33 +08004647 struct page *page = pages[s / (PAGE_SIZE >> 9)];
NeilBrown3ea7daa2012-05-22 13:53:47 +10004648 int len = (max_sectors - s) << 9;
4649 if (len > PAGE_SIZE)
4650 len = PAGE_SIZE;
4651 for (bio = blist; bio ; bio = bio->bi_next) {
Ming Leic85ba142017-03-17 00:12:22 +08004652 /*
4653 * won't fail because the vec table is big enough
4654 * to hold all these pages
4655 */
4656 bio_add_page(bio, page, len, 0);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004657 }
4658 sector_nr += len >> 9;
4659 nr_sectors += len >> 9;
4660 }
NeilBrownd094d682016-06-02 16:19:52 +10004661 rcu_read_unlock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004662 r10_bio->sectors = nr_sectors;
4663
4664 /* Now submit the read */
Christoph Hellwig74d46992017-08-23 19:10:32 +02004665 md_sync_acct_bio(read_bio, r10_bio->sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004666 atomic_inc(&r10_bio->remaining);
4667 read_bio->bi_next = NULL;
4668 generic_make_request(read_bio);
4669 sector_nr += nr_sectors;
4670 sectors_done += nr_sectors;
4671 if (sector_nr <= last)
4672 goto read_more;
4673
Xiao Ni1d0ffd22018-08-30 15:57:09 +08004674 lower_barrier(conf);
4675
NeilBrown3ea7daa2012-05-22 13:53:47 +10004676 /* Now that we have done the whole section we can
4677 * update reshape_progress
4678 */
4679 if (mddev->reshape_backwards)
4680 conf->reshape_progress -= sectors_done;
4681 else
4682 conf->reshape_progress += sectors_done;
4683
4684 return sectors_done;
4685}
4686
4687static void end_reshape_request(struct r10bio *r10_bio);
4688static int handle_reshape_read_error(struct mddev *mddev,
4689 struct r10bio *r10_bio);
4690static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4691{
4692 /* Reshape read completed. Hopefully we have a block
4693 * to write out.
4694 * If we got a read error then we do sync 1-page reads from
4695 * elsewhere until we find the data - or give up.
4696 */
4697 struct r10conf *conf = mddev->private;
4698 int s;
4699
4700 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4701 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4702 /* Reshape has been aborted */
4703 md_done_sync(mddev, r10_bio->sectors, 0);
4704 return;
4705 }
4706
4707 /* We definitely have the data in the pages, schedule the
4708 * writes.
4709 */
4710 atomic_set(&r10_bio->remaining, 1);
4711 for (s = 0; s < conf->copies*2; s++) {
4712 struct bio *b;
4713 int d = r10_bio->devs[s/2].devnum;
4714 struct md_rdev *rdev;
NeilBrownd094d682016-06-02 16:19:52 +10004715 rcu_read_lock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004716 if (s&1) {
NeilBrownd094d682016-06-02 16:19:52 +10004717 rdev = rcu_dereference(conf->mirrors[d].replacement);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004718 b = r10_bio->devs[s/2].repl_bio;
4719 } else {
NeilBrownd094d682016-06-02 16:19:52 +10004720 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004721 b = r10_bio->devs[s/2].bio;
4722 }
NeilBrownd094d682016-06-02 16:19:52 +10004723 if (!rdev || test_bit(Faulty, &rdev->flags)) {
4724 rcu_read_unlock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004725 continue;
NeilBrownd094d682016-06-02 16:19:52 +10004726 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004727 atomic_inc(&rdev->nr_pending);
NeilBrownd094d682016-06-02 16:19:52 +10004728 rcu_read_unlock();
Christoph Hellwig74d46992017-08-23 19:10:32 +02004729 md_sync_acct_bio(b, r10_bio->sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004730 atomic_inc(&r10_bio->remaining);
4731 b->bi_next = NULL;
4732 generic_make_request(b);
4733 }
4734 end_reshape_request(r10_bio);
4735}
4736
4737static void end_reshape(struct r10conf *conf)
4738{
4739 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4740 return;
4741
4742 spin_lock_irq(&conf->device_lock);
4743 conf->prev = conf->geo;
4744 md_finish_reshape(conf->mddev);
4745 smp_wmb();
4746 conf->reshape_progress = MaxSector;
NeilBrown299b0682015-07-06 17:37:49 +10004747 conf->reshape_safe = MaxSector;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004748 spin_unlock_irq(&conf->device_lock);
4749
4750 /* read-ahead size must cover two whole stripes, which is
4751 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4752 */
4753 if (conf->mddev->queue) {
4754 int stripe = conf->geo.raid_disks *
4755 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4756 stripe /= conf->geo.near_copies;
Jan Karadc3b17c2017-02-02 15:56:50 +01004757 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
4758 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004759 }
4760 conf->fullsync = 0;
4761}
4762
Guoqing Jiang7564bed2018-10-18 16:37:42 +08004763static void raid10_update_reshape_pos(struct mddev *mddev)
4764{
4765 struct r10conf *conf = mddev->private;
Guoqing Jiang5ebaf802018-10-18 16:37:43 +08004766 sector_t lo, hi;
Guoqing Jiang7564bed2018-10-18 16:37:42 +08004767
Guoqing Jiang5ebaf802018-10-18 16:37:43 +08004768 md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4769 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4770 || mddev->reshape_position == MaxSector)
4771 conf->reshape_progress = mddev->reshape_position;
4772 else
4773 WARN_ON_ONCE(1);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08004774}
4775
NeilBrown3ea7daa2012-05-22 13:53:47 +10004776static int handle_reshape_read_error(struct mddev *mddev,
4777 struct r10bio *r10_bio)
4778{
4779 /* Use sync reads to get the blocks from somewhere else */
4780 int sectors = r10_bio->sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004781 struct r10conf *conf = mddev->private;
Matthias Kaehlcke584ed9fa2017-10-05 11:28:47 -07004782 struct r10bio *r10b;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004783 int slot = 0;
4784 int idx = 0;
Ming Lei2d06e3b2017-03-17 00:12:35 +08004785 struct page **pages;
4786
Matthias Kaehlcke584ed9fa2017-10-05 11:28:47 -07004787 r10b = kmalloc(sizeof(*r10b) +
4788 sizeof(struct r10dev) * conf->copies, GFP_NOIO);
4789 if (!r10b) {
4790 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4791 return -ENOMEM;
4792 }
4793
Ming Lei2d06e3b2017-03-17 00:12:35 +08004794 /* reshape IOs share pages from .devs[0].bio */
4795 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004796
NeilBrowne0ee7782012-08-18 09:51:42 +10004797 r10b->sector = r10_bio->sector;
4798 __raid10_find_phys(&conf->prev, r10b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004799
4800 while (sectors) {
4801 int s = sectors;
4802 int success = 0;
4803 int first_slot = slot;
4804
4805 if (s > (PAGE_SIZE >> 9))
4806 s = PAGE_SIZE >> 9;
4807
NeilBrownd094d682016-06-02 16:19:52 +10004808 rcu_read_lock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004809 while (!success) {
NeilBrowne0ee7782012-08-18 09:51:42 +10004810 int d = r10b->devs[slot].devnum;
NeilBrownd094d682016-06-02 16:19:52 +10004811 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004812 sector_t addr;
4813 if (rdev == NULL ||
4814 test_bit(Faulty, &rdev->flags) ||
4815 !test_bit(In_sync, &rdev->flags))
4816 goto failed;
4817
NeilBrowne0ee7782012-08-18 09:51:42 +10004818 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
NeilBrownd094d682016-06-02 16:19:52 +10004819 atomic_inc(&rdev->nr_pending);
4820 rcu_read_unlock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004821 success = sync_page_io(rdev,
4822 addr,
4823 s << 9,
Ming Lei2d06e3b2017-03-17 00:12:35 +08004824 pages[idx],
Mike Christie796a5cf2016-06-05 14:32:07 -05004825 REQ_OP_READ, 0, false);
NeilBrownd094d682016-06-02 16:19:52 +10004826 rdev_dec_pending(rdev, mddev);
4827 rcu_read_lock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004828 if (success)
4829 break;
4830 failed:
4831 slot++;
4832 if (slot >= conf->copies)
4833 slot = 0;
4834 if (slot == first_slot)
4835 break;
4836 }
NeilBrownd094d682016-06-02 16:19:52 +10004837 rcu_read_unlock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004838 if (!success) {
4839 /* couldn't read this block, must give up */
4840 set_bit(MD_RECOVERY_INTR,
4841 &mddev->recovery);
Matthias Kaehlcke584ed9fa2017-10-05 11:28:47 -07004842 kfree(r10b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004843 return -EIO;
4844 }
4845 sectors -= s;
4846 idx++;
4847 }
Matthias Kaehlcke584ed9fa2017-10-05 11:28:47 -07004848 kfree(r10b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004849 return 0;
4850}
4851
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02004852static void end_reshape_write(struct bio *bio)
NeilBrown3ea7daa2012-05-22 13:53:47 +10004853{
Ming Leif0250612017-03-17 00:12:33 +08004854 struct r10bio *r10_bio = get_resync_r10bio(bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004855 struct mddev *mddev = r10_bio->mddev;
4856 struct r10conf *conf = mddev->private;
4857 int d;
4858 int slot;
4859 int repl;
4860 struct md_rdev *rdev = NULL;
4861
4862 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4863 if (repl)
4864 rdev = conf->mirrors[d].replacement;
4865 if (!rdev) {
4866 smp_mb();
4867 rdev = conf->mirrors[d].rdev;
4868 }
4869
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02004870 if (bio->bi_status) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10004871 /* FIXME should record badblock */
4872 md_error(mddev, rdev);
4873 }
4874
4875 rdev_dec_pending(rdev, mddev);
4876 end_reshape_request(r10_bio);
4877}
4878
4879static void end_reshape_request(struct r10bio *r10_bio)
4880{
4881 if (!atomic_dec_and_test(&r10_bio->remaining))
4882 return;
4883 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4884 bio_put(r10_bio->master_bio);
4885 put_buf(r10_bio);
4886}
4887
4888static void raid10_finish_reshape(struct mddev *mddev)
4889{
4890 struct r10conf *conf = mddev->private;
4891
4892 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4893 return;
4894
4895 if (mddev->delta_disks > 0) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10004896 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4897 mddev->recovery_cp = mddev->resync_max_sectors;
4898 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4899 }
BingJing Chang88763912018-02-22 13:34:46 +08004900 mddev->resync_max_sectors = mddev->array_sectors;
NeilBrown63aced62012-05-22 13:55:33 +10004901 } else {
4902 int d;
NeilBrownd094d682016-06-02 16:19:52 +10004903 rcu_read_lock();
NeilBrown63aced62012-05-22 13:55:33 +10004904 for (d = conf->geo.raid_disks ;
4905 d < conf->geo.raid_disks - mddev->delta_disks;
4906 d++) {
NeilBrownd094d682016-06-02 16:19:52 +10004907 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown63aced62012-05-22 13:55:33 +10004908 if (rdev)
4909 clear_bit(In_sync, &rdev->flags);
NeilBrownd094d682016-06-02 16:19:52 +10004910 rdev = rcu_dereference(conf->mirrors[d].replacement);
NeilBrown63aced62012-05-22 13:55:33 +10004911 if (rdev)
4912 clear_bit(In_sync, &rdev->flags);
4913 }
NeilBrownd094d682016-06-02 16:19:52 +10004914 rcu_read_unlock();
NeilBrown3ea7daa2012-05-22 13:53:47 +10004915 }
4916 mddev->layout = mddev->new_layout;
4917 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4918 mddev->reshape_position = MaxSector;
4919 mddev->delta_disks = 0;
4920 mddev->reshape_backwards = 0;
4921}
4922
NeilBrown84fc4b52011-10-11 16:49:58 +11004923static struct md_personality raid10_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924{
4925 .name = "raid10",
NeilBrown2604b702006-01-06 00:20:36 -08004926 .level = 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 .owner = THIS_MODULE,
Shaohua Li849674e2016-01-20 13:52:20 -08004928 .make_request = raid10_make_request,
4929 .run = raid10_run,
NeilBrownafa0f552014-12-15 12:56:58 +11004930 .free = raid10_free,
Shaohua Li849674e2016-01-20 13:52:20 -08004931 .status = raid10_status,
4932 .error_handler = raid10_error,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 .hot_add_disk = raid10_add_disk,
4934 .hot_remove_disk= raid10_remove_disk,
4935 .spare_active = raid10_spare_active,
Shaohua Li849674e2016-01-20 13:52:20 -08004936 .sync_request = raid10_sync_request,
NeilBrown6cce3b22006-01-06 00:20:16 -08004937 .quiesce = raid10_quiesce,
Dan Williams80c3a6c2009-03-17 18:10:40 -07004938 .size = raid10_size,
NeilBrown006a09a2012-03-19 12:46:40 +11004939 .resize = raid10_resize,
Trela, Maciejdab8b292010-03-08 16:02:45 +11004940 .takeover = raid10_takeover,
NeilBrown3ea7daa2012-05-22 13:53:47 +10004941 .check_reshape = raid10_check_reshape,
4942 .start_reshape = raid10_start_reshape,
4943 .finish_reshape = raid10_finish_reshape,
Guoqing Jiang7564bed2018-10-18 16:37:42 +08004944 .update_reshape_pos = raid10_update_reshape_pos,
NeilBrown5c675f82014-12-15 12:56:56 +11004945 .congested = raid10_congested,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946};
4947
4948static int __init raid_init(void)
4949{
NeilBrown2604b702006-01-06 00:20:36 -08004950 return register_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951}
4952
4953static void raid_exit(void)
4954{
NeilBrown2604b702006-01-06 00:20:36 -08004955 unregister_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956}
4957
4958module_init(raid_init);
4959module_exit(raid_exit);
4960MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11004961MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962MODULE_ALIAS("md-personality-9"); /* RAID10 */
NeilBrownd9d166c2006-01-06 00:20:51 -08004963MODULE_ALIAS("md-raid10");
NeilBrown2604b702006-01-06 00:20:36 -08004964MODULE_ALIAS("md-level-10");
NeilBrown34db0cd2011-10-11 16:50:01 +11004965
4966module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);