blob: 848365d474f3a3bca26168ac3619f2c52edd7618 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 raid0.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10003 Copyright (C) 1994-96 Marc ZYNGIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
NeilBrownf72ffdd2014-09-30 14:23:59 +10006 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
8 RAID-0 management functions.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
NeilBrownf72ffdd2014-09-30 14:23:59 +100014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License
16 (for example /usr/src/linux/COPYING); if not, write to the Free
NeilBrownf72ffdd2014-09-30 14:23:59 +100017 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070018*/
19
NeilBrownbff61972009-03-31 14:33:13 +110020#include <linux/blkdev.h>
NeilBrownbff61972009-03-31 14:33:13 +110021#include <linux/seq_file.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040022#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
NeilBrown109e3762016-11-18 13:22:04 +110024#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110025#include "md.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110026#include "raid0.h"
Trela, Maciej9af204c2010-03-08 16:02:44 +110027#include "raid5.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Shaohua Li394ed8e2017-01-04 16:10:19 -080029#define UNSUPPORTED_MDDEV_FLAGS \
30 ((1L << MD_HAS_JOURNAL) | \
31 (1L << MD_JOURNAL_CLEAN) | \
32 (1L << MD_FAILFAST_SUPPORTED))
33
NeilBrown5c675f82014-12-15 12:56:56 +110034static int raid0_congested(struct mddev *mddev, int bits)
NeilBrown26be34d2006-10-03 01:15:53 -070035{
NeilBrowne373ab12011-10-11 16:48:59 +110036 struct r0conf *conf = mddev->private;
NeilBrown3cb03002011-10-11 16:45:26 +110037 struct md_rdev **devlist = conf->devlist;
NeilBrown84707f32010-03-16 17:23:35 +110038 int raid_disks = conf->strip_zone[0].nb_dev;
NeilBrown26be34d2006-10-03 01:15:53 -070039 int i, ret = 0;
40
NeilBrown84707f32010-03-16 17:23:35 +110041 for (i = 0; i < raid_disks && !ret ; i++) {
Jens Axboe165125e2007-07-24 09:28:11 +020042 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
NeilBrown26be34d2006-10-03 01:15:53 -070043
44 ret |= bdi_congested(&q->backing_dev_info, bits);
45 }
46 return ret;
47}
48
raz ben yehuda46994192009-06-16 17:00:54 +100049/*
50 * inform the user of the raid configuration
51*/
NeilBrownfd01b882011-10-11 16:47:53 +110052static void dump_zones(struct mddev *mddev)
raz ben yehuda46994192009-06-16 17:00:54 +100053{
NeilBrown50de8df2011-10-07 14:23:22 +110054 int j, k;
raz ben yehuda46994192009-06-16 17:00:54 +100055 sector_t zone_size = 0;
56 sector_t zone_start = 0;
57 char b[BDEVNAME_SIZE];
NeilBrowne373ab12011-10-11 16:48:59 +110058 struct r0conf *conf = mddev->private;
NeilBrown84707f32010-03-16 17:23:35 +110059 int raid_disks = conf->strip_zone[0].nb_dev;
NeilBrown76603882016-11-02 14:16:50 +110060 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
61 mdname(mddev),
62 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
raz ben yehuda46994192009-06-16 17:00:54 +100063 for (j = 0; j < conf->nr_strip_zones; j++) {
NeilBrown76603882016-11-02 14:16:50 +110064 char line[200];
65 int len = 0;
66
raz ben yehuda46994192009-06-16 17:00:54 +100067 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
NeilBrown76603882016-11-02 14:16:50 +110068 len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
69 bdevname(conf->devlist[j*raid_disks
70 + k]->bdev, b));
71 pr_debug("md: zone%d=[%s]\n", j, line);
raz ben yehuda46994192009-06-16 17:00:54 +100072
73 zone_size = conf->strip_zone[j].zone_end - zone_start;
NeilBrown76603882016-11-02 14:16:50 +110074 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
raz ben yehuda46994192009-06-16 17:00:54 +100075 (unsigned long long)zone_start>>1,
76 (unsigned long long)conf->strip_zone[j].dev_start>>1,
77 (unsigned long long)zone_size>>1);
78 zone_start = conf->strip_zone[j].zone_end;
79 }
raz ben yehuda46994192009-06-16 17:00:54 +100080}
81
NeilBrowne373ab12011-10-11 16:48:59 +110082static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
NeilBrowna9f326e2009-09-23 18:06:41 +100084 int i, c, err;
NeilBrown49f357a22009-06-16 16:50:35 +100085 sector_t curr_zone_end, sectors;
NeilBrown3cb03002011-10-11 16:45:26 +110086 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 struct strip_zone *zone;
88 int cnt;
89 char b[BDEVNAME_SIZE];
NeilBrown50de8df2011-10-07 14:23:22 +110090 char b2[BDEVNAME_SIZE];
NeilBrowne373ab12011-10-11 16:48:59 +110091 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
NeilBrown199dc6e2015-08-03 13:11:47 +100092 unsigned short blksize = 512;
Andre Nolled7b0032009-06-16 16:47:36 +100093
Dan Carpenter7dedd152016-04-14 12:31:49 +030094 *private_conf = ERR_PTR(-ENOMEM);
Andre Nolled7b0032009-06-16 16:47:36 +100095 if (!conf)
96 return -ENOMEM;
NeilBrowndafb20f2012-03-19 12:46:39 +110097 rdev_for_each(rdev1, mddev) {
NeilBrown50de8df2011-10-07 14:23:22 +110098 pr_debug("md/raid0:%s: looking at %s\n",
99 mdname(mddev),
100 bdevname(rdev1->bdev, b));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 c = 0;
NeilBrown13f26822009-06-18 08:48:55 +1000102
103 /* round size to chunk_size */
104 sectors = rdev1->sectors;
105 sector_div(sectors, mddev->chunk_sectors);
106 rdev1->sectors = sectors * mddev->chunk_sectors;
107
NeilBrown199dc6e2015-08-03 13:11:47 +1000108 blksize = max(blksize, queue_logical_block_size(
109 rdev1->bdev->bd_disk->queue));
110
NeilBrowndafb20f2012-03-19 12:46:39 +1100111 rdev_for_each(rdev2, mddev) {
NeilBrown50de8df2011-10-07 14:23:22 +1100112 pr_debug("md/raid0:%s: comparing %s(%llu)"
113 " with %s(%llu)\n",
114 mdname(mddev),
115 bdevname(rdev1->bdev,b),
116 (unsigned long long)rdev1->sectors,
117 bdevname(rdev2->bdev,b2),
118 (unsigned long long)rdev2->sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 if (rdev2 == rdev1) {
NeilBrown50de8df2011-10-07 14:23:22 +1100120 pr_debug("md/raid0:%s: END\n",
121 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 break;
123 }
Andre Nolldd8ac332009-03-31 14:33:13 +1100124 if (rdev2->sectors == rdev1->sectors) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 /*
126 * Not unique, don't count it as a new
127 * group
128 */
NeilBrown50de8df2011-10-07 14:23:22 +1100129 pr_debug("md/raid0:%s: EQUAL\n",
130 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 c = 1;
132 break;
133 }
NeilBrown50de8df2011-10-07 14:23:22 +1100134 pr_debug("md/raid0:%s: NOT EQUAL\n",
135 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 }
137 if (!c) {
NeilBrown50de8df2011-10-07 14:23:22 +1100138 pr_debug("md/raid0:%s: ==> UNIQUE\n",
139 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 conf->nr_strip_zones++;
NeilBrown50de8df2011-10-07 14:23:22 +1100141 pr_debug("md/raid0:%s: %d zones\n",
142 mdname(mddev), conf->nr_strip_zones);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 }
144 }
NeilBrown50de8df2011-10-07 14:23:22 +1100145 pr_debug("md/raid0:%s: FINAL %d zones\n",
146 mdname(mddev), conf->nr_strip_zones);
NeilBrown199dc6e2015-08-03 13:11:47 +1000147 /*
148 * now since we have the hard sector sizes, we can make sure
149 * chunk size is a multiple of that sector size
150 */
151 if ((mddev->chunk_sectors << 9) % blksize) {
NeilBrown76603882016-11-02 14:16:50 +1100152 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
153 mdname(mddev),
154 mddev->chunk_sectors << 9, blksize);
NeilBrown199dc6e2015-08-03 13:11:47 +1000155 err = -EINVAL;
156 goto abort;
157 }
158
Andre Nolled7b0032009-06-16 16:47:36 +1000159 err = -ENOMEM;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800160 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 conf->nr_strip_zones, GFP_KERNEL);
162 if (!conf->strip_zone)
Andre Nolled7b0032009-06-16 16:47:36 +1000163 goto abort;
NeilBrown3cb03002011-10-11 16:45:26 +1100164 conf->devlist = kzalloc(sizeof(struct md_rdev*)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 conf->nr_strip_zones*mddev->raid_disks,
166 GFP_KERNEL);
167 if (!conf->devlist)
Andre Nolled7b0032009-06-16 16:47:36 +1000168 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 /* The first zone must contain all devices, so here we check that
171 * there is a proper alignment of slots to devices and find them all
172 */
173 zone = &conf->strip_zone[0];
174 cnt = 0;
175 smallest = NULL;
NeilBrownb4145792009-06-16 16:50:52 +1000176 dev = conf->devlist;
Andre Nolled7b0032009-06-16 16:47:36 +1000177 err = -EINVAL;
NeilBrowndafb20f2012-03-19 12:46:39 +1100178 rdev_for_each(rdev1, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 int j = rdev1->raid_disk;
180
NeilBrowne93f68a2010-06-15 09:36:03 +0100181 if (mddev->level == 10) {
Trela, Maciej9af204c2010-03-08 16:02:44 +1100182 /* taking over a raid10-n2 array */
183 j /= 2;
NeilBrowne93f68a2010-06-15 09:36:03 +0100184 rdev1->new_raid_disk = j;
185 }
Trela, Maciej9af204c2010-03-08 16:02:44 +1100186
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100187 if (mddev->level == 1) {
188 /* taiking over a raid1 array-
189 * we have only one active disk
190 */
191 j = 0;
192 rdev1->new_raid_disk = j;
193 }
194
NeilBrownf96c9f32013-02-21 15:50:07 +1100195 if (j < 0) {
NeilBrown76603882016-11-02 14:16:50 +1100196 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
197 mdname(mddev));
NeilBrownf96c9f32013-02-21 15:50:07 +1100198 goto abort;
199 }
200 if (j >= mddev->raid_disks) {
NeilBrown76603882016-11-02 14:16:50 +1100201 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
202 mdname(mddev), j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 goto abort;
204 }
NeilBrownb4145792009-06-16 16:50:52 +1000205 if (dev[j]) {
NeilBrown76603882016-11-02 14:16:50 +1100206 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
207 mdname(mddev), j);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 goto abort;
209 }
NeilBrownb4145792009-06-16 16:50:52 +1000210 dev[j] = rdev1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Andre Nolldd8ac332009-03-31 14:33:13 +1100212 if (!smallest || (rdev1->sectors < smallest->sectors))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 smallest = rdev1;
214 cnt++;
215 }
216 if (cnt != mddev->raid_disks) {
NeilBrown76603882016-11-02 14:16:50 +1100217 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
218 mdname(mddev), cnt, mddev->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 goto abort;
220 }
221 zone->nb_dev = cnt;
NeilBrown49f357a22009-06-16 16:50:35 +1000222 zone->zone_end = smallest->sectors * cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
NeilBrown49f357a22009-06-16 16:50:35 +1000224 curr_zone_end = zone->zone_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 /* now do the other zones */
227 for (i = 1; i < conf->nr_strip_zones; i++)
228 {
NeilBrowna9f326e2009-09-23 18:06:41 +1000229 int j;
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 zone = conf->strip_zone + i;
NeilBrownb4145792009-06-16 16:50:52 +1000232 dev = conf->devlist + i * mddev->raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
NeilBrown50de8df2011-10-07 14:23:22 +1100234 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
NeilBrownd27a43ab2009-06-16 16:46:46 +1000235 zone->dev_start = smallest->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 smallest = NULL;
237 c = 0;
238
239 for (j=0; j<cnt; j++) {
NeilBrownb4145792009-06-16 16:50:52 +1000240 rdev = conf->devlist[j];
NeilBrownd27a43ab2009-06-16 16:46:46 +1000241 if (rdev->sectors <= zone->dev_start) {
NeilBrown50de8df2011-10-07 14:23:22 +1100242 pr_debug("md/raid0:%s: checking %s ... nope\n",
243 mdname(mddev),
244 bdevname(rdev->bdev, b));
Andre Nolldd8ac332009-03-31 14:33:13 +1100245 continue;
246 }
NeilBrown50de8df2011-10-07 14:23:22 +1100247 pr_debug("md/raid0:%s: checking %s ..."
248 " contained as device %d\n",
249 mdname(mddev),
250 bdevname(rdev->bdev, b), c);
NeilBrownb4145792009-06-16 16:50:52 +1000251 dev[c] = rdev;
Andre Nolldd8ac332009-03-31 14:33:13 +1100252 c++;
253 if (!smallest || rdev->sectors < smallest->sectors) {
254 smallest = rdev;
NeilBrown50de8df2011-10-07 14:23:22 +1100255 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
256 mdname(mddev),
257 (unsigned long long)rdev->sectors);
Andre Nolldd8ac332009-03-31 14:33:13 +1100258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260
261 zone->nb_dev = c;
NeilBrown49f357a22009-06-16 16:50:35 +1000262 sectors = (smallest->sectors - zone->dev_start) * c;
NeilBrown50de8df2011-10-07 14:23:22 +1100263 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
264 mdname(mddev),
265 zone->nb_dev, (unsigned long long)sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
NeilBrown49f357a22009-06-16 16:50:35 +1000267 curr_zone_end += sectors;
NeilBrownd27a43ab2009-06-16 16:46:46 +1000268 zone->zone_end = curr_zone_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
NeilBrown50de8df2011-10-07 14:23:22 +1100270 pr_debug("md/raid0:%s: current zone start: %llu\n",
271 mdname(mddev),
272 (unsigned long long)smallest->sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
NeilBrown50de8df2011-10-07 14:23:22 +1100275 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100276 *private_conf = conf;
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 return 0;
Andre Noll5568a602009-06-16 16:47:21 +1000279abort:
Andre Nolled7b0032009-06-16 16:47:36 +1000280 kfree(conf->strip_zone);
281 kfree(conf->devlist);
282 kfree(conf);
NeilBrown58ebb342013-02-21 15:36:38 +1100283 *private_conf = ERR_PTR(err);
Andre Nolled7b0032009-06-16 16:47:36 +1000284 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
NeilBrownba13da42012-03-19 12:46:39 +1100287/* Find the zone which holds a particular offset
288 * Update *sectorp to be an offset in that zone
289 */
290static struct strip_zone *find_zone(struct r0conf *conf,
291 sector_t *sectorp)
292{
293 int i;
294 struct strip_zone *z = conf->strip_zone;
295 sector_t sector = *sectorp;
296
297 for (i = 0; i < conf->nr_strip_zones; i++)
298 if (sector < z[i].zone_end) {
299 if (i)
300 *sectorp = sector - z[i-1].zone_end;
301 return z + i;
302 }
303 BUG();
304}
305
306/*
307 * remaps the bio to the target device. we separate two flows.
NeilBrown47d68972015-04-10 13:19:04 +1000308 * power 2 flow and a general flow for the sake of performance
NeilBrownba13da42012-03-19 12:46:39 +1100309*/
310static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
311 sector_t sector, sector_t *sector_offset)
312{
313 unsigned int sect_in_chunk;
314 sector_t chunk;
315 struct r0conf *conf = mddev->private;
316 int raid_disks = conf->strip_zone[0].nb_dev;
317 unsigned int chunk_sects = mddev->chunk_sectors;
318
319 if (is_power_of_2(chunk_sects)) {
320 int chunksect_bits = ffz(~chunk_sects);
321 /* find the sector offset inside the chunk */
322 sect_in_chunk = sector & (chunk_sects - 1);
323 sector >>= chunksect_bits;
324 /* chunk in zone */
325 chunk = *sector_offset;
326 /* quotient is the chunk in real device*/
327 sector_div(chunk, zone->nb_dev << chunksect_bits);
328 } else{
329 sect_in_chunk = sector_div(sector, chunk_sects);
330 chunk = *sector_offset;
331 sector_div(chunk, chunk_sects * zone->nb_dev);
332 }
333 /*
334 * position the bio over the real device
335 * real sector = chunk in device + starting of zone
336 * + the position in the chunk
337 */
338 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
339 return conf->devlist[(zone - conf->strip_zone)*raid_disks
340 + sector_div(sector, zone->nb_dev)];
341}
342
NeilBrownfd01b882011-10-11 16:47:53 +1100343static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
Dan Williams80c3a6c2009-03-17 18:10:40 -0700344{
345 sector_t array_sectors = 0;
NeilBrown3cb03002011-10-11 16:45:26 +1100346 struct md_rdev *rdev;
Dan Williams80c3a6c2009-03-17 18:10:40 -0700347
348 WARN_ONCE(sectors || raid_disks,
349 "%s does not support generic reshape\n", __func__);
350
NeilBrowndafb20f2012-03-19 12:46:39 +1100351 rdev_for_each(rdev, mddev)
NeilBrowna6468532013-02-21 14:33:17 +1100352 array_sectors += (rdev->sectors &
353 ~(sector_t)(mddev->chunk_sectors-1));
Dan Williams80c3a6c2009-03-17 18:10:40 -0700354
355 return array_sectors;
356}
357
NeilBrownafa0f552014-12-15 12:56:58 +1100358static void raid0_free(struct mddev *mddev, void *priv);
majianpeng0366ef82012-04-02 09:48:37 +1000359
NeilBrownfd01b882011-10-11 16:47:53 +1100360static int raid0_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
NeilBrowne373ab12011-10-11 16:48:59 +1100362 struct r0conf *conf;
Andre Noll5568a602009-06-16 16:47:21 +1000363 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Andre Noll9d8f0362009-06-18 08:45:01 +1000365 if (mddev->chunk_sectors == 0) {
NeilBrown76603882016-11-02 14:16:50 +1100366 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
NeilBrown2604b702006-01-06 00:20:36 -0800367 return -EINVAL;
368 }
Andre Noll0894cc32009-06-18 08:49:23 +1000369 if (md_check_no_bitmap(mddev))
370 return -EINVAL;
Heinz Mauelshagen753f2852015-02-13 19:48:01 +0100371
Trela, Maciej9af204c2010-03-08 16:02:44 +1100372 /* if private is not null, we are here after takeover */
373 if (mddev->private == NULL) {
374 ret = create_strip_zones(mddev, &conf);
375 if (ret < 0)
376 return ret;
377 mddev->private = conf;
378 }
379 conf = mddev->private;
NeilBrown199dc6e2015-08-03 13:11:47 +1000380 if (mddev->queue) {
381 struct md_rdev *rdev;
382 bool discard_supported = false;
383
NeilBrown199dc6e2015-08-03 13:11:47 +1000384 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
385 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
386 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
387
388 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
389 blk_queue_io_opt(mddev->queue,
390 (mddev->chunk_sectors << 9) * mddev->raid_disks);
391
NeilBrown66eefe52015-09-24 15:47:47 +1000392 rdev_for_each(rdev, mddev) {
393 disk_stack_limits(mddev->gendisk, rdev->bdev,
394 rdev->data_offset << 9);
395 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
396 discard_supported = true;
397 }
NeilBrown199dc6e2015-08-03 13:11:47 +1000398 if (!discard_supported)
399 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
400 else
401 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* calculate array device size */
Dan Williams1f403622009-03-31 14:59:03 +1100405 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
NeilBrown76603882016-11-02 14:16:50 +1100407 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
408 mdname(mddev),
409 (unsigned long long)mddev->array_sectors);
Heinz Mauelshagen753f2852015-02-13 19:48:01 +0100410
411 if (mddev->queue) {
412 /* calculate the max read-ahead size.
413 * For read-ahead of large files to be effective, we need to
414 * readahead at least twice a whole stripe. i.e. number of devices
415 * multiplied by chunk size times 2.
416 * If an individual device has an ra_pages greater than the
417 * chunk size, then we will not drive that device as hard as it
418 * wants. We consider this a configuration error: a larger
419 * chunksize should be used in that case.
420 */
Andre Noll9d8f0362009-06-18 08:45:01 +1000421 int stripe = mddev->raid_disks *
422 (mddev->chunk_sectors << 9) / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
424 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
425 }
426
raz ben yehuda46994192009-06-16 17:00:54 +1000427 dump_zones(mddev);
majianpeng0366ef82012-04-02 09:48:37 +1000428
429 ret = md_integrity_register(mddev);
majianpeng0366ef82012-04-02 09:48:37 +1000430
431 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
NeilBrownafa0f552014-12-15 12:56:58 +1100434static void raid0_free(struct mddev *mddev, void *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435{
NeilBrownafa0f552014-12-15 12:56:58 +1100436 struct r0conf *conf = priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700438 kfree(conf->strip_zone);
Andre Nollfb5ab4b2009-06-16 16:48:19 +1000439 kfree(conf->devlist);
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700440 kfree(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441}
442
raz ben yehudafbb704e2009-06-16 17:02:05 +1000443/*
444 * Is io distribute over 1 or more chunks ?
445*/
NeilBrownfd01b882011-10-11 16:47:53 +1100446static inline int is_io_in_chunk_boundary(struct mddev *mddev,
raz ben yehudafbb704e2009-06-16 17:02:05 +1000447 unsigned int chunk_sects, struct bio *bio)
448{
NeilBrownd6e412e2009-06-18 08:47:00 +1000449 if (likely(is_power_of_2(chunk_sects))) {
Kent Overstreet4f024f32013-10-11 15:44:27 -0700450 return chunk_sects >=
451 ((bio->bi_iter.bi_sector & (chunk_sects-1))
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800452 + bio_sectors(bio));
raz ben yehudafbb704e2009-06-16 17:02:05 +1000453 } else{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700454 sector_t sector = bio->bi_iter.bi_sector;
raz ben yehudafbb704e2009-06-16 17:02:05 +1000455 return chunk_sects >= (sector_div(sector, chunk_sects)
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800456 + bio_sectors(bio));
raz ben yehudafbb704e2009-06-16 17:02:05 +1000457 }
458}
459
Linus Torvaldsb4fdcb02011-11-04 17:06:58 -0700460static void raid0_make_request(struct mddev *mddev, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 struct strip_zone *zone;
NeilBrown3cb03002011-10-11 16:45:26 +1100463 struct md_rdev *tmp_dev;
Kent Overstreet20d01892013-11-23 18:21:01 -0800464 struct bio *split;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Jens Axboe1eff9d32016-08-05 15:35:16 -0600466 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
Tejun Heoe9c74692010-09-03 11:56:18 +0200467 md_flush_request(mddev, bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200468 return;
NeilBrowne5dcdd82005-09-09 16:23:41 -0700469 }
470
Kent Overstreet20d01892013-11-23 18:21:01 -0800471 do {
NeilBrown109e3762016-11-18 13:22:04 +1100472 sector_t bio_sector = bio->bi_iter.bi_sector;
473 sector_t sector = bio_sector;
Kent Overstreet20d01892013-11-23 18:21:01 -0800474 unsigned chunk_sects = mddev->chunk_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Kent Overstreet20d01892013-11-23 18:21:01 -0800476 unsigned sectors = chunk_sects -
477 (likely(is_power_of_2(chunk_sects))
478 ? (sector & (chunk_sects-1))
479 : sector_div(sector, chunk_sects));
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +0200480
Eric Worka8115772015-05-18 23:26:23 -0700481 /* Restore due to sector_div */
NeilBrown109e3762016-11-18 13:22:04 +1100482 sector = bio_sector;
Eric Worka8115772015-05-18 23:26:23 -0700483
Kent Overstreet20d01892013-11-23 18:21:01 -0800484 if (sectors < bio_sectors(bio)) {
485 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
486 bio_chain(split, bio);
487 } else {
488 split = bio;
489 }
Shaohua Lic83057a2012-10-11 13:25:44 +1100490
Kent Overstreet20d01892013-11-23 18:21:01 -0800491 zone = find_zone(mddev->private, &sector);
492 tmp_dev = map_sector(mddev, zone, sector, &sector);
493 split->bi_bdev = tmp_dev->bdev;
494 split->bi_iter.bi_sector = sector + zone->dev_start +
495 tmp_dev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Mike Christie796a5cf2016-06-05 14:32:07 -0500497 if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
Kent Overstreet20d01892013-11-23 18:21:01 -0800498 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
499 /* Just ignore it */
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200500 bio_endio(split);
NeilBrown109e3762016-11-18 13:22:04 +1100501 } else {
502 if (mddev->gendisk)
503 trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
504 split, disk_devt(mddev->gendisk),
505 bio_sector);
Kent Overstreet20d01892013-11-23 18:21:01 -0800506 generic_make_request(split);
NeilBrown109e3762016-11-18 13:22:04 +1100507 }
Kent Overstreet20d01892013-11-23 18:21:01 -0800508 } while (split != bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
NeilBrown8299d7f2007-10-16 23:30:53 -0700510
NeilBrownfd01b882011-10-11 16:47:53 +1100511static void raid0_status(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
Andre Noll9d8f0362009-06-18 08:45:01 +1000513 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 return;
515}
516
NeilBrownfd01b882011-10-11 16:47:53 +1100517static void *raid0_takeover_raid45(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100518{
NeilBrown3cb03002011-10-11 16:45:26 +1100519 struct md_rdev *rdev;
NeilBrowne373ab12011-10-11 16:48:59 +1100520 struct r0conf *priv_conf;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100521
522 if (mddev->degraded != 1) {
NeilBrown76603882016-11-02 14:16:50 +1100523 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
524 mdname(mddev),
525 mddev->degraded);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100526 return ERR_PTR(-EINVAL);
527 }
528
NeilBrowndafb20f2012-03-19 12:46:39 +1100529 rdev_for_each(rdev, mddev) {
Trela, Maciej9af204c2010-03-08 16:02:44 +1100530 /* check slot number for a disk */
531 if (rdev->raid_disk == mddev->raid_disks-1) {
NeilBrown76603882016-11-02 14:16:50 +1100532 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
533 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100534 return ERR_PTR(-EINVAL);
535 }
NeilBrowneea136d2013-06-26 11:55:20 +1000536 rdev->sectors = mddev->dev_sectors;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100537 }
538
539 /* Set new parameters */
540 mddev->new_level = 0;
Maciej Trela001048a2010-06-16 11:55:14 +0100541 mddev->new_layout = 0;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100542 mddev->new_chunk_sectors = mddev->chunk_sectors;
543 mddev->raid_disks--;
544 mddev->delta_disks = -1;
545 /* make sure it will be not marked as dirty */
546 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800547 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100548
549 create_strip_zones(mddev, &priv_conf);
Shaohua Li6995f0b2016-12-08 15:48:17 -0800550
Trela, Maciej9af204c2010-03-08 16:02:44 +1100551 return priv_conf;
552}
553
NeilBrownfd01b882011-10-11 16:47:53 +1100554static void *raid0_takeover_raid10(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100555{
NeilBrowne373ab12011-10-11 16:48:59 +1100556 struct r0conf *priv_conf;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100557
558 /* Check layout:
559 * - far_copies must be 1
560 * - near_copies must be 2
561 * - disks number must be even
562 * - all mirrors must be already degraded
563 */
564 if (mddev->layout != ((1 << 8) + 2)) {
NeilBrown76603882016-11-02 14:16:50 +1100565 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
566 mdname(mddev),
567 mddev->layout);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100568 return ERR_PTR(-EINVAL);
569 }
570 if (mddev->raid_disks & 1) {
NeilBrown76603882016-11-02 14:16:50 +1100571 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
572 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100573 return ERR_PTR(-EINVAL);
574 }
575 if (mddev->degraded != (mddev->raid_disks>>1)) {
NeilBrown76603882016-11-02 14:16:50 +1100576 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
577 mdname(mddev));
Trela, Maciej9af204c2010-03-08 16:02:44 +1100578 return ERR_PTR(-EINVAL);
579 }
580
581 /* Set new parameters */
582 mddev->new_level = 0;
Maciej Trela001048a2010-06-16 11:55:14 +0100583 mddev->new_layout = 0;
Trela, Maciej9af204c2010-03-08 16:02:44 +1100584 mddev->new_chunk_sectors = mddev->chunk_sectors;
585 mddev->delta_disks = - mddev->raid_disks / 2;
586 mddev->raid_disks += mddev->delta_disks;
587 mddev->degraded = 0;
588 /* make sure it will be not marked as dirty */
589 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800590 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100591
592 create_strip_zones(mddev, &priv_conf);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100593 return priv_conf;
594}
595
NeilBrownfd01b882011-10-11 16:47:53 +1100596static void *raid0_takeover_raid1(struct mddev *mddev)
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100597{
NeilBrowne373ab12011-10-11 16:48:59 +1100598 struct r0conf *priv_conf;
Jes Sorensen24b961f2012-04-01 23:48:38 +1000599 int chunksect;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100600
601 /* Check layout:
602 * - (N - 1) mirror drives must be already faulty
603 */
604 if ((mddev->raid_disks - 1) != mddev->degraded) {
NeilBrown76603882016-11-02 14:16:50 +1100605 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100606 mdname(mddev));
607 return ERR_PTR(-EINVAL);
608 }
609
Jes Sorensen24b961f2012-04-01 23:48:38 +1000610 /*
611 * a raid1 doesn't have the notion of chunk size, so
612 * figure out the largest suitable size we can use.
613 */
614 chunksect = 64 * 2; /* 64K by default */
615
616 /* The array must be an exact multiple of chunksize */
617 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
618 chunksect >>= 1;
619
620 if ((chunksect << 9) < PAGE_SIZE)
621 /* array size does not allow a suitable chunk size */
622 return ERR_PTR(-EINVAL);
623
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100624 /* Set new parameters */
625 mddev->new_level = 0;
626 mddev->new_layout = 0;
Jes Sorensen24b961f2012-04-01 23:48:38 +1000627 mddev->new_chunk_sectors = chunksect;
628 mddev->chunk_sectors = chunksect;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100629 mddev->delta_disks = 1 - mddev->raid_disks;
Krzysztof Wojcikf7bee802011-02-14 10:01:41 +1100630 mddev->raid_disks = 1;
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100631 /* make sure it will be not marked as dirty */
632 mddev->recovery_cp = MaxSector;
Shaohua Li394ed8e2017-01-04 16:10:19 -0800633 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100634
635 create_strip_zones(mddev, &priv_conf);
636 return priv_conf;
637}
638
NeilBrownfd01b882011-10-11 16:47:53 +1100639static void *raid0_takeover(struct mddev *mddev)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100640{
641 /* raid0 can take over:
Maciej Trela049d6c12010-06-16 11:56:12 +0100642 * raid4 - if all data disks are active.
Trela, Maciej9af204c2010-03-08 16:02:44 +1100643 * raid5 - providing it is Raid4 layout and one disk is faulty
644 * raid10 - assuming we have all necessary active disks
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100645 * raid1 - with (N -1) mirror drives faulty
Trela, Maciej9af204c2010-03-08 16:02:44 +1100646 */
NeilBrowna8461a62014-08-06 16:34:27 +1000647
648 if (mddev->bitmap) {
NeilBrown76603882016-11-02 14:16:50 +1100649 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
650 mdname(mddev));
NeilBrowna8461a62014-08-06 16:34:27 +1000651 return ERR_PTR(-EBUSY);
652 }
Maciej Trela049d6c12010-06-16 11:56:12 +0100653 if (mddev->level == 4)
654 return raid0_takeover_raid45(mddev);
655
Trela, Maciej9af204c2010-03-08 16:02:44 +1100656 if (mddev->level == 5) {
657 if (mddev->layout == ALGORITHM_PARITY_N)
Maciej Trela049d6c12010-06-16 11:56:12 +0100658 return raid0_takeover_raid45(mddev);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100659
NeilBrown76603882016-11-02 14:16:50 +1100660 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
661 mdname(mddev), ALGORITHM_PARITY_N);
Trela, Maciej9af204c2010-03-08 16:02:44 +1100662 }
663
664 if (mddev->level == 10)
665 return raid0_takeover_raid10(mddev);
666
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100667 if (mddev->level == 1)
668 return raid0_takeover_raid1(mddev);
669
NeilBrown76603882016-11-02 14:16:50 +1100670 pr_warn("Takeover from raid%i to raid0 not supported\n",
Krzysztof Wojcikfc3a08b2011-01-31 13:47:13 +1100671 mddev->level);
672
Trela, Maciej9af204c2010-03-08 16:02:44 +1100673 return ERR_PTR(-EINVAL);
674}
675
NeilBrownfd01b882011-10-11 16:47:53 +1100676static void raid0_quiesce(struct mddev *mddev, int state)
Trela, Maciej9af204c2010-03-08 16:02:44 +1100677{
678}
679
NeilBrown84fc4b52011-10-11 16:49:58 +1100680static struct md_personality raid0_personality=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681{
682 .name = "raid0",
NeilBrown2604b702006-01-06 00:20:36 -0800683 .level = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 .owner = THIS_MODULE,
685 .make_request = raid0_make_request,
686 .run = raid0_run,
NeilBrownafa0f552014-12-15 12:56:58 +1100687 .free = raid0_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 .status = raid0_status,
Dan Williams80c3a6c2009-03-17 18:10:40 -0700689 .size = raid0_size,
Trela, Maciej9af204c2010-03-08 16:02:44 +1100690 .takeover = raid0_takeover,
691 .quiesce = raid0_quiesce,
NeilBrown5c675f82014-12-15 12:56:56 +1100692 .congested = raid0_congested,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693};
694
695static int __init raid0_init (void)
696{
NeilBrown2604b702006-01-06 00:20:36 -0800697 return register_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
700static void raid0_exit (void)
701{
NeilBrown2604b702006-01-06 00:20:36 -0800702 unregister_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703}
704
705module_init(raid0_init);
706module_exit(raid0_exit);
707MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +1100708MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709MODULE_ALIAS("md-personality-2"); /* RAID0 */
NeilBrownd9d166c2006-01-06 00:20:51 -0800710MODULE_ALIAS("md-raid0");
NeilBrown2604b702006-01-06 00:20:36 -0800711MODULE_ALIAS("md-level-0");