blob: 9074ea43d52336020c97a0cab47be39483d86e63 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
7
8
9 RAID-0 management functions.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/raid/raid0.h>
22
Jens Axboe165125e2007-07-24 09:28:11 +020023static void raid0_unplug(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
25 mddev_t *mddev = q->queuedata;
26 raid0_conf_t *conf = mddev_to_conf(mddev);
27 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
28 int i;
29
30 for (i=0; i<mddev->raid_disks; i++) {
Jens Axboe165125e2007-07-24 09:28:11 +020031 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -050033 blk_unplug(r_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 }
35}
36
NeilBrown26be34d2006-10-03 01:15:53 -070037static int raid0_congested(void *data, int bits)
38{
39 mddev_t *mddev = data;
40 raid0_conf_t *conf = mddev_to_conf(mddev);
41 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
42 int i, ret = 0;
43
44 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
Jens Axboe165125e2007-07-24 09:28:11 +020045 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
NeilBrown26be34d2006-10-03 01:15:53 -070046
47 ret |= bdi_congested(&q->backing_dev_info, bits);
48 }
49 return ret;
50}
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static int create_strip_zones (mddev_t *mddev)
54{
55 int i, c, j;
Andre Noll6b8796c2009-01-09 08:31:07 +110056 sector_t current_start, curr_zone_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 sector_t min_spacing;
58 raid0_conf_t *conf = mddev_to_conf(mddev);
59 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
60 struct list_head *tmp1, *tmp2;
61 struct strip_zone *zone;
62 int cnt;
63 char b[BDEVNAME_SIZE];
64
65 /*
66 * The number of 'same size groups'
67 */
68 conf->nr_strip_zones = 0;
69
NeilBrownd089c6a2008-02-06 01:39:59 -080070 rdev_for_each(rdev1, tmp1, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 printk("raid0: looking at %s\n",
72 bdevname(rdev1->bdev,b));
73 c = 0;
NeilBrownd089c6a2008-02-06 01:39:59 -080074 rdev_for_each(rdev2, tmp2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 printk("raid0: comparing %s(%llu)",
76 bdevname(rdev1->bdev,b),
77 (unsigned long long)rdev1->size);
78 printk(" with %s(%llu)\n",
79 bdevname(rdev2->bdev,b),
80 (unsigned long long)rdev2->size);
81 if (rdev2 == rdev1) {
82 printk("raid0: END\n");
83 break;
84 }
85 if (rdev2->size == rdev1->size)
86 {
87 /*
88 * Not unique, don't count it as a new
89 * group
90 */
91 printk("raid0: EQUAL\n");
92 c = 1;
93 break;
94 }
95 printk("raid0: NOT EQUAL\n");
96 }
97 if (!c) {
98 printk("raid0: ==> UNIQUE\n");
99 conf->nr_strip_zones++;
100 printk("raid0: %d zones\n", conf->nr_strip_zones);
101 }
102 }
103 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
104
NeilBrown9ffae0c2006-01-06 00:20:32 -0800105 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 conf->nr_strip_zones, GFP_KERNEL);
107 if (!conf->strip_zone)
108 return 1;
NeilBrown9ffae0c2006-01-06 00:20:32 -0800109 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 conf->nr_strip_zones*mddev->raid_disks,
111 GFP_KERNEL);
112 if (!conf->devlist)
113 return 1;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 /* The first zone must contain all devices, so here we check that
116 * there is a proper alignment of slots to devices and find them all
117 */
118 zone = &conf->strip_zone[0];
119 cnt = 0;
120 smallest = NULL;
121 zone->dev = conf->devlist;
NeilBrownd089c6a2008-02-06 01:39:59 -0800122 rdev_for_each(rdev1, tmp1, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 int j = rdev1->raid_disk;
124
125 if (j < 0 || j >= mddev->raid_disks) {
126 printk("raid0: bad disk number %d - aborting!\n", j);
127 goto abort;
128 }
129 if (zone->dev[j]) {
130 printk("raid0: multiple devices for %d - aborting!\n",
131 j);
132 goto abort;
133 }
134 zone->dev[j] = rdev1;
135
136 blk_queue_stack_limits(mddev->queue,
137 rdev1->bdev->bd_disk->queue);
138 /* as we don't honour merge_bvec_fn, we must never risk
139 * violating it, so limit ->max_sector to one PAGE, as
140 * a one page request is never in violation.
141 */
142
143 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
144 mddev->queue->max_sectors > (PAGE_SIZE>>9))
145 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
146
147 if (!smallest || (rdev1->size <smallest->size))
148 smallest = rdev1;
149 cnt++;
150 }
151 if (cnt != mddev->raid_disks) {
152 printk("raid0: too few disks (%d of %d) - aborting!\n",
153 cnt, mddev->raid_disks);
154 goto abort;
155 }
156 zone->nb_dev = cnt;
157 zone->size = smallest->size * cnt;
Andre Noll6199d3d2009-01-09 08:31:07 +1100158 zone->zone_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Andre Noll6b8796c2009-01-09 08:31:07 +1100160 current_start = smallest->size * 2;
161 curr_zone_start = zone->size * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 /* now do the other zones */
164 for (i = 1; i < conf->nr_strip_zones; i++)
165 {
166 zone = conf->strip_zone + i;
167 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
168
169 printk("raid0: zone %d\n", i);
Andre Noll6b8796c2009-01-09 08:31:07 +1100170 zone->dev_start = current_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 smallest = NULL;
172 c = 0;
173
174 for (j=0; j<cnt; j++) {
175 char b[BDEVNAME_SIZE];
176 rdev = conf->strip_zone[0].dev[j];
177 printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
Andre Noll6b8796c2009-01-09 08:31:07 +1100178 if (rdev->size > current_start / 2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 printk(" contained as device %d\n", c);
180 zone->dev[c] = rdev;
181 c++;
182 if (!smallest || (rdev->size <smallest->size)) {
183 smallest = rdev;
184 printk(" (%llu) is smallest!.\n",
185 (unsigned long long)rdev->size);
186 }
187 } else
188 printk(" nope.\n");
189 }
190
191 zone->nb_dev = c;
Andre Noll6b8796c2009-01-09 08:31:07 +1100192 zone->size = (smallest->size - current_start / 2) * c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 printk("raid0: zone->nb_dev: %d, size: %llu\n",
194 zone->nb_dev, (unsigned long long)zone->size);
195
Andre Noll6b8796c2009-01-09 08:31:07 +1100196 zone->zone_start = curr_zone_start;
197 curr_zone_start += zone->size * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Andre Noll6b8796c2009-01-09 08:31:07 +1100199 current_start = smallest->size * 2;
200 printk(KERN_INFO "raid0: current zone start: %llu\n",
201 (unsigned long long)current_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
203
204 /* Now find appropriate hash spacing.
205 * We want a number which causes most hash entries to cover
206 * at most two strips, but the hash table must be at most
207 * 1 PAGE. We choose the smallest strip, or contiguous collection
208 * of strips, that has big enough size. We never consider the last
209 * strip though as it's size has no bearing on the efficacy of the hash
210 * table.
211 */
Andre Noll6b8796c2009-01-09 08:31:07 +1100212 conf->hash_spacing = curr_zone_start / 2;
213 min_spacing = curr_zone_start / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
215 for (i=0; i < conf->nr_strip_zones-1; i++) {
216 sector_t sz = 0;
217 for (j=i; j<conf->nr_strip_zones-1 &&
218 sz < min_spacing ; j++)
219 sz += conf->strip_zone[j].size;
220 if (sz >= min_spacing && sz < conf->hash_spacing)
221 conf->hash_spacing = sz;
222 }
223
224 mddev->queue->unplug_fn = raid0_unplug;
225
NeilBrown26be34d2006-10-03 01:15:53 -0700226 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
227 mddev->queue->backing_dev_info.congested_data = mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 printk("raid0: done.\n");
230 return 0;
231 abort:
232 return 1;
233}
234
235/**
236 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
237 * @q: request queue
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200238 * @bvm: properties of new bio
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 * @biovec: the request that could be merged to it.
240 *
241 * Return amount of bytes we can accept at this offset
242 */
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200243static int raid0_mergeable_bvec(struct request_queue *q,
244 struct bvec_merge_data *bvm,
245 struct bio_vec *biovec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 mddev_t *mddev = q->queuedata;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200248 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 int max;
250 unsigned int chunk_sectors = mddev->chunk_size >> 9;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200251 unsigned int bio_sectors = bvm->bi_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
254 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
255 if (max <= biovec->bv_len && bio_sectors == 0)
256 return biovec->bv_len;
257 else
258 return max;
259}
260
261static int raid0_run (mddev_t *mddev)
262{
263 unsigned cur=0, i=0, nb_zone;
264 s64 size;
265 raid0_conf_t *conf;
266 mdk_rdev_t *rdev;
267 struct list_head *tmp;
268
NeilBrown2604b702006-01-06 00:20:36 -0800269 if (mddev->chunk_size == 0) {
270 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
271 return -EINVAL;
272 }
273 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 mdname(mddev),
275 mddev->chunk_size >> 9,
276 (mddev->chunk_size>>1)-1);
277 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
278 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
Neil Browne7e72bf2008-05-14 16:05:54 -0700279 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
282 if (!conf)
283 goto out;
284 mddev->private = (void *)conf;
285
286 conf->strip_zone = NULL;
287 conf->devlist = NULL;
288 if (create_strip_zones (mddev))
289 goto out_free_conf;
290
291 /* calculate array device size */
Andre Nollf233ea52008-07-21 17:05:22 +1000292 mddev->array_sectors = 0;
NeilBrownd089c6a2008-02-06 01:39:59 -0800293 rdev_for_each(rdev, tmp, mddev)
Andre Nollf233ea52008-07-21 17:05:22 +1000294 mddev->array_sectors += rdev->size * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296 printk("raid0 : md_size is %llu blocks.\n",
Andre Nollf233ea52008-07-21 17:05:22 +1000297 (unsigned long long)mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 printk("raid0 : conf->hash_spacing is %llu blocks.\n",
299 (unsigned long long)conf->hash_spacing);
300 {
Andre Nollf233ea52008-07-21 17:05:22 +1000301 sector_t s = mddev->array_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 sector_t space = conf->hash_spacing;
303 int round;
304 conf->preshift = 0;
Neil Brown1eb29122005-07-15 03:56:27 -0700305 if (sizeof(sector_t) > sizeof(u32)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 /*shift down space and s so that sector_div will work */
Neil Brown1eb29122005-07-15 03:56:27 -0700307 while (space > (sector_t) (~(u32)0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 s >>= 1;
309 space >>= 1;
310 s += 1; /* force round-up */
311 conf->preshift++;
312 }
313 }
Neil Brown1eb29122005-07-15 03:56:27 -0700314 round = sector_div(s, (u32)space) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 nb_zone = s + round;
316 }
317 printk("raid0 : nb_zone is %d.\n", nb_zone);
318
319 printk("raid0 : Allocating %Zd bytes for hash.\n",
320 nb_zone*sizeof(struct strip_zone*));
321 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
322 if (!conf->hash_table)
323 goto out_free_conf;
324 size = conf->strip_zone[cur].size;
325
NeilBrown5c4c3332006-05-22 22:35:26 -0700326 conf->hash_table[0] = conf->strip_zone + cur;
327 for (i=1; i< nb_zone; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 while (size <= conf->hash_spacing) {
329 cur++;
330 size += conf->strip_zone[cur].size;
331 }
332 size -= conf->hash_spacing;
NeilBrown5c4c3332006-05-22 22:35:26 -0700333 conf->hash_table[i] = conf->strip_zone + cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
335 if (conf->preshift) {
336 conf->hash_spacing >>= conf->preshift;
337 /* round hash_spacing up so when we divide by it, we
338 * err on the side of too-low, which is safest
339 */
340 conf->hash_spacing++;
341 }
342
343 /* calculate the max read-ahead size.
344 * For read-ahead of large files to be effective, we need to
345 * readahead at least twice a whole stripe. i.e. number of devices
346 * multiplied by chunk size times 2.
347 * If an individual device has an ra_pages greater than the
348 * chunk size, then we will not drive that device as hard as it
349 * wants. We consider this a configuration error: a larger
350 * chunksize should be used in that case.
351 */
352 {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800353 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
355 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
356 }
357
358
359 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
360 return 0;
361
362out_free_conf:
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700363 kfree(conf->strip_zone);
364 kfree(conf->devlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 kfree(conf);
366 mddev->private = NULL;
367out:
NeilBrown29fc7e32006-02-03 03:03:41 -0800368 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
371static int raid0_stop (mddev_t *mddev)
372{
373 raid0_conf_t *conf = mddev_to_conf(mddev);
374
375 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700376 kfree(conf->hash_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 conf->hash_table = NULL;
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700378 kfree(conf->strip_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 conf->strip_zone = NULL;
Jesper Juhl990a8ba2005-06-21 17:17:30 -0700380 kfree(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 mddev->private = NULL;
382
383 return 0;
384}
385
Jens Axboe165125e2007-07-24 09:28:11 +0200386static int raid0_make_request (struct request_queue *q, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 mddev_t *mddev = q->queuedata;
Andre Nolla4712002009-01-09 08:31:06 +1100389 unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 raid0_conf_t *conf = mddev_to_conf(mddev);
391 struct strip_zone *zone;
392 mdk_rdev_t *tmp_dev;
NeilBrown787f17f2007-05-23 13:58:09 -0700393 sector_t chunk;
Andre Nolle0f06862009-01-09 08:31:06 +1100394 sector_t sector, rsect;
Jens Axboea3623572005-11-01 09:26:16 +0100395 const int rw = bio_data_dir(bio);
Tejun Heoc9959052008-08-25 19:47:21 +0900396 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
NeilBrowne5dcdd82005-09-09 16:23:41 -0700398 if (unlikely(bio_barrier(bio))) {
NeilBrown6712ecf2007-09-27 12:47:43 +0200399 bio_endio(bio, -EOPNOTSUPP);
NeilBrowne5dcdd82005-09-09 16:23:41 -0700400 return 0;
401 }
402
Tejun Heo074a7ac2008-08-25 19:56:14 +0900403 cpu = part_stat_lock();
404 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
405 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
406 bio_sectors(bio));
407 part_stat_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 chunk_sects = mddev->chunk_size >> 9;
Andre Noll1b7fdf82009-01-09 08:31:06 +1100410 chunksect_bits = ffz(~chunk_sects);
Andre Nolle0f06862009-01-09 08:31:06 +1100411 sector = bio->bi_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
414 struct bio_pair *bp;
415 /* Sanity check -- queue functions should prevent this happening */
416 if (bio->bi_vcnt != 1 ||
417 bio->bi_idx != 0)
418 goto bad_map;
419 /* This is a one page bio that upper layers
420 * refuse to split for us, so we need to split it.
421 */
Denis ChengRq6feef532008-10-09 08:57:05 +0200422 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (raid0_make_request(q, &bp->bio1))
424 generic_make_request(&bp->bio1);
425 if (raid0_make_request(q, &bp->bio2))
426 generic_make_request(&bp->bio2);
427
428 bio_pair_release(bp);
429 return 0;
430 }
431
432
433 {
Andre Nolle0f06862009-01-09 08:31:06 +1100434 sector_t x = sector >> (conf->preshift + 1);
Neil Brown1eb29122005-07-15 03:56:27 -0700435 sector_div(x, (u32)conf->hash_spacing);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 zone = conf->hash_table[x];
437 }
438
Andre Noll6199d3d2009-01-09 08:31:07 +1100439 while (sector / 2 >= (zone->zone_start / 2 + zone->size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 zone++;
441
Andre Nolla4712002009-01-09 08:31:06 +1100442 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444
445 {
Andre Noll6199d3d2009-01-09 08:31:07 +1100446 sector_t x = (sector - zone->zone_start) >> chunksect_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 sector_div(x, zone->nb_dev);
449 chunk = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Andre Nolle0f06862009-01-09 08:31:06 +1100451 x = sector >> chunksect_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
453 }
Andre Noll019c4e22009-01-09 08:31:06 +1100454 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 bio->bi_bdev = tmp_dev->bdev;
457 bio->bi_sector = rsect + tmp_dev->data_offset;
458
459 /*
460 * Let the main block layer submit the IO and resolve recursion:
461 */
462 return 1;
463
464bad_map:
465 printk("raid0_make_request bug: can't convert block across chunks"
Andre Nolla4712002009-01-09 08:31:06 +1100466 " or bigger than %dk %llu %d\n", chunk_sects / 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
468
NeilBrown6712ecf2007-09-27 12:47:43 +0200469 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 return 0;
471}
NeilBrown8299d7f2007-10-16 23:30:53 -0700472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473static void raid0_status (struct seq_file *seq, mddev_t *mddev)
474{
475#undef MD_DEBUG
476#ifdef MD_DEBUG
477 int j, k, h;
478 char b[BDEVNAME_SIZE];
479 raid0_conf_t *conf = mddev_to_conf(mddev);
NeilBrown8299d7f2007-10-16 23:30:53 -0700480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 h = 0;
482 for (j = 0; j < conf->nr_strip_zones; j++) {
483 seq_printf(seq, " z%d", j);
484 if (conf->hash_table[h] == conf->strip_zone+j)
NeilBrown8299d7f2007-10-16 23:30:53 -0700485 seq_printf(seq, "(h%d)", h++);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 seq_printf(seq, "=[");
487 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
NeilBrown8299d7f2007-10-16 23:30:53 -0700488 seq_printf(seq, "%s/", bdevname(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 conf->strip_zone[j].dev[k]->bdev,b));
490
Andre Noll6199d3d2009-01-09 08:31:07 +1100491 seq_printf(seq, "] zs=%d ds=%d s=%d\n",
492 conf->strip_zone[j].zone_start,
Andre Noll019c4e22009-01-09 08:31:06 +1100493 conf->strip_zone[j].dev_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 conf->strip_zone[j].size);
495 }
496#endif
497 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
498 return;
499}
500
NeilBrown2604b702006-01-06 00:20:36 -0800501static struct mdk_personality raid0_personality=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 .name = "raid0",
NeilBrown2604b702006-01-06 00:20:36 -0800504 .level = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 .owner = THIS_MODULE,
506 .make_request = raid0_make_request,
507 .run = raid0_run,
508 .stop = raid0_stop,
509 .status = raid0_status,
510};
511
512static int __init raid0_init (void)
513{
NeilBrown2604b702006-01-06 00:20:36 -0800514 return register_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
517static void raid0_exit (void)
518{
NeilBrown2604b702006-01-06 00:20:36 -0800519 unregister_md_personality (&raid0_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
521
522module_init(raid0_init);
523module_exit(raid0_exit);
524MODULE_LICENSE("GPL");
525MODULE_ALIAS("md-personality-2"); /* RAID0 */
NeilBrownd9d166c2006-01-06 00:20:51 -0800526MODULE_ALIAS("md-raid0");
NeilBrown2604b702006-01-06 00:20:36 -0800527MODULE_ALIAS("md-level-0");